include needed python result files

This commit is contained in:
Michael Hohn
2021-11-09 11:46:14 -08:00
committed by =Michael Hohn
commit d180a079b0
39 changed files with 23204 additions and 0 deletions

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print("Usage: %s FILE" % sys.argv[0])
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum))
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = int(m.group(2), 16)
end = int(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += int(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print("No errors detected in %u functions." % num_funcs)
else:
if num_errors > 1:
err="errors"
else:
err="error"
print("%u %s detected in %u functions." % (num_errors, err, num_funcs))
sys.exit(1)

View File

@@ -0,0 +1,496 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
# This is simply to aide in creating the entries in the order of the value of
# the device-global NI signal/terminal constants defined in comedi.h
import comedi_h
import os, sys, re
from csv_collection import CSVCollection
def c_to_o(filename, prefix='\t\t\t\t\t ni_routing/', suffix=' \\'):
if not filename.endswith('.c'):
return ''
return prefix + filename.rpartition('.c')[0] + '.o' + suffix
def routedict_to_structinit_single(name, D, return_name=False):
Locals = dict()
lines = [
'\t.family = "{}",'.format(name),
'\t.register_values = {',
'\t\t/*',
'\t\t * destination = {',
'\t\t * source = register value,',
'\t\t * ...',
'\t\t * }',
'\t\t */',
]
if (False):
# print table with index0:src, index1:dest
D0 = D # (src-> dest->reg_value)
#D1 : destD
else:
D0 = dict()
for src, destD in D.items():
for dest, val in destD.items():
D0.setdefault(dest, {})[src] = val
D0 = sorted(D0.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals))
for D0_sig, D1_D in D0:
D1 = sorted(D1_D.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals))
lines.append('\t\t[B({})] = {{'.format(D0_sig))
for D1_sig, value in D1:
if not re.match('[VIU]\([^)]*\)', value):
sys.stderr.write('Invalid register format: {}\n'.format(repr(value)))
sys.stderr.write(
'Register values should be formatted with V(),I(),or U()\n')
raise RuntimeError('Invalid register values format')
lines.append('\t\t\t[B({})]\t= {},'.format(D1_sig, value))
lines.append('\t\t},')
lines.append('\t},')
lines = '\n'.join(lines)
if return_name:
return N, lines
else:
return lines
def routedict_to_routelist_single(name, D, indent=1):
Locals = dict()
indents = dict(
I0 = '\t'*(indent),
I1 = '\t'*(indent+1),
I2 = '\t'*(indent+2),
I3 = '\t'*(indent+3),
I4 = '\t'*(indent+4),
)
if (False):
# data is src -> dest-list
D0 = D
keyname = 'src'
valname = 'dest'
else:
# data is dest -> src-list
keyname = 'dest'
valname = 'src'
D0 = dict()
for src, destD in D.items():
for dest, val in destD.items():
D0.setdefault(dest, {})[src] = val
# Sort by order of device-global names (numerically)
D0 = sorted(D0.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals))
lines = [ '{I0}.device = "{name}",\n'
'{I0}.routes = (struct ni_route_set[]){{'
.format(name=name, **indents) ]
for D0_sig, D1_D in D0:
D1 = [ k for k,v in D1_D.items() if v ]
D1.sort(key=lambda i: eval(i, comedi_h.__dict__, Locals))
lines.append('{I1}{{\n{I2}.{keyname} = {D0_sig},\n'
'{I2}.{valname} = (int[]){{'
.format(keyname=keyname, valname=valname, D0_sig=D0_sig, **indents)
)
for D1_sig in D1:
lines.append( '{I3}{D1_sig},'.format(D1_sig=D1_sig, **indents) )
lines.append( '{I3}0, /* Termination */'.format(**indents) )
lines.append('{I2}}}\n{I1}}},'.format(**indents))
lines.append('{I1}{{ /* Termination of list */\n{I2}.{keyname} = 0,\n{I1}}},'
.format(keyname=keyname, **indents))
lines.append('{I0}}},'.format(**indents))
return '\n'.join(lines)
class DeviceRoutes(CSVCollection):
MKFILE_SEGMENTS = 'device-route.mk'
SET_C = 'ni_device_routes.c'
ITEMS_DIR = 'ni_device_routes'
EXTERN_H = 'all.h'
OUTPUT_DIR = 'c'
output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <olsonse@umich.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "ni_device_routes.h"
#include "{extern_h}"\
""".format(filename=SET_C, extern_h=os.path.join(ITEMS_DIR, EXTERN_H))
extern_header = """\
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <olsonse@umich.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#ifndef _COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H
#define _COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H
#include "../ni_device_routes.h"
{externs}
#endif //_COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H
"""
single_output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <olsonse@umich.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "../ni_device_routes.h"
#include "{extern_h}"
struct ni_device_routes {table_name} = {{\
"""
def __init__(self, pattern='csv/device_routes/*.csv'):
super(DeviceRoutes,self).__init__(pattern)
def to_listinit(self):
chunks = [ self.output_file_top,
'',
'struct ni_device_routes *const ni_device_routes_list[] = {'
]
# put the sheets in lexical order of device numbers then bus
sheets = sorted(self.items(), key=lambda i : tuple(i[0].split('-')[::-1]) )
externs = []
objs = [c_to_o(self.SET_C)]
for sheet,D in sheets:
S = sheet.lower()
dev_table_name = 'ni_{}_device_routes'.format(S.replace('-','_'))
sheet_filename = os.path.join(self.ITEMS_DIR,'{}.c'.format(S))
externs.append('extern struct ni_device_routes {};'.format(dev_table_name))
chunks.append('\t&{},'.format(dev_table_name))
s_chunks = [
self.single_output_file_top.format(
filename = sheet_filename,
table_name = dev_table_name,
extern_h = self.EXTERN_H,
),
routedict_to_routelist_single(S, D),
'};',
]
objs.append(c_to_o(sheet_filename))
with open(os.path.join(self.OUTPUT_DIR, sheet_filename), 'w') as f:
f.write('\n'.join(s_chunks))
f.write('\n')
with open(os.path.join(self.OUTPUT_DIR, self.MKFILE_SEGMENTS), 'w') as f:
f.write('# This is the segment that should be included in comedi/drivers/Makefile\n')
f.write('ni_routing-objs\t\t\t\t+= \\\n')
f.write('\n'.join(objs))
f.write('\n')
EXTERN_H = os.path.join(self.ITEMS_DIR, self.EXTERN_H)
with open(os.path.join(self.OUTPUT_DIR, EXTERN_H), 'w') as f:
f.write(self.extern_header.format(
filename=EXTERN_H, externs='\n'.join(externs)))
chunks.append('\tNULL,') # terminate list
chunks.append('};')
return '\n'.join(chunks)
def save(self):
filename=os.path.join(self.OUTPUT_DIR, self.SET_C)
try:
os.makedirs(os.path.join(self.OUTPUT_DIR, self.ITEMS_DIR))
except:
pass
with open(filename,'w') as f:
f.write( self.to_listinit() )
f.write( '\n' )
class RouteValues(CSVCollection):
MKFILE_SEGMENTS = 'route-values.mk'
SET_C = 'ni_route_values.c'
ITEMS_DIR = 'ni_route_values'
EXTERN_H = 'all.h'
OUTPUT_DIR = 'c'
output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/{filename}
* Route information for NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <olsonse@umich.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file includes the tables that are a list of all the values of various
* signals routes available on NI hardware. In many cases, one does not
* explicitly make these routes, rather one might indicate that something is
* used as the source of one particular trigger or another (using
* *_src=TRIG_EXT).
*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#include "ni_route_values.h"
#include "{extern_h}"\
""".format(filename=SET_C, extern_h=os.path.join(ITEMS_DIR, EXTERN_H))
extern_header = """\
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <olsonse@umich.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* The contents of this file are generated using the tools in
* comedi/drivers/ni_routing/tools
*
* Please use those tools to help maintain the contents of this file.
*/
#ifndef _COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H
#define _COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H
#include "../ni_route_values.h"
{externs}
#endif //_COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H
"""
single_output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_routing/{filename}
* Route information for {sheet} boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2016 Spencer E. Olson <olsonse@umich.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file includes a list of all the values of various signals routes
* available on NI 660x hardware. In many cases, one does not explicitly make
* these routes, rather one might indicate that something is used as the source
* of one particular trigger or another (using *_src=TRIG_EXT).
*
* The contents of this file can be generated using the tools in
* comedi/drivers/ni_routing/tools. This file also contains specific notes to
* this family of devices.
*
* Please use those tools to help maintain the contents of this file, but be
* mindful to not lose the notes already made in this file, since these notes
* are critical to a complete undertsanding of the register values of this
* family.
*/
#include "../ni_route_values.h"
#include "{extern_h}"
const struct family_route_values {table_name} = {{\
"""
def __init__(self, pattern='csv/route_values/*.csv'):
super(RouteValues,self).__init__(pattern)
def to_structinit(self):
chunks = [ self.output_file_top,
'',
'const struct family_route_values *const ni_all_route_values[] = {'
]
# put the sheets in lexical order for consistency
sheets = sorted(self.items(), key=lambda i : i[0] )
externs = []
objs = [c_to_o(self.SET_C)]
for sheet,D in sheets:
S = sheet.lower()
fam_table_name = '{}_route_values'.format(S.replace('-','_'))
sheet_filename = os.path.join(self.ITEMS_DIR,'{}.c'.format(S))
externs.append('extern const struct family_route_values {};'.format(fam_table_name))
chunks.append('\t&{},'.format(fam_table_name))
s_chunks = [
self.single_output_file_top.format(
filename = sheet_filename,
sheet = sheet.upper(),
table_name = fam_table_name,
extern_h = self.EXTERN_H,
),
routedict_to_structinit_single(S, D),
'};',
]
objs.append(c_to_o(sheet_filename))
with open(os.path.join(self.OUTPUT_DIR, sheet_filename), 'w') as f:
f.write('\n'.join(s_chunks))
f.write( '\n' )
with open(os.path.join(self.OUTPUT_DIR, self.MKFILE_SEGMENTS), 'w') as f:
f.write('# This is the segment that should be included in comedi/drivers/Makefile\n')
f.write('ni_routing-objs\t\t\t\t+= \\\n')
f.write('\n'.join(objs))
f.write('\n')
EXTERN_H = os.path.join(self.ITEMS_DIR, self.EXTERN_H)
with open(os.path.join(self.OUTPUT_DIR, EXTERN_H), 'w') as f:
f.write(self.extern_header.format(
filename=EXTERN_H, externs='\n'.join(externs)))
chunks.append('\tNULL,') # terminate list
chunks.append('};')
return '\n'.join(chunks)
def save(self):
filename=os.path.join(self.OUTPUT_DIR, self.SET_C)
try:
os.makedirs(os.path.join(self.OUTPUT_DIR, self.ITEMS_DIR))
except:
pass
with open(filename,'w') as f:
f.write( self.to_structinit() )
f.write( '\n' )
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( '--route_values', action='store_true',
help='Extract route values from csv/route_values/*.csv' )
parser.add_argument( '--device_routes', action='store_true',
help='Extract route values from csv/device_routes/*.csv' )
args = parser.parse_args()
KL = list()
if args.route_values:
KL.append( RouteValues )
if args.device_routes:
KL.append( DeviceRoutes )
if not KL:
parser.error('nothing to do...')
for K in KL:
doc = K()
doc.save()

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
from os import path
import os, csv
from itertools import chain
from csv_collection import CSVCollection
from ni_names import value_to_name
import ni_values
CSV_DIR = 'csv'
def iter_src_values(D):
return D.items()
def iter_src(D):
for dest in D:
yield dest, 1
def create_csv(name, D, src_iter):
# have to change dest->{src:val} to src->{dest:val}
fieldnames = [value_to_name[i] for i in sorted(D.keys())]
fieldnames.insert(0, CSVCollection.source_column_name)
S = dict()
for dest, srcD in D.items():
for src,val in src_iter(srcD):
S.setdefault(src,{})[dest] = val
S = sorted(S.items(), key = lambda src_destD : src_destD[0])
csv_fname = path.join(CSV_DIR, name + '.csv')
with open(csv_fname, 'w') as F_csv:
dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"')
dR.writeheader()
# now change the json back into the csv dictionaries
rows = [
dict(chain(
((CSVCollection.source_column_name,value_to_name[src]),),
*(((value_to_name[dest],v),) for dest,v in destD.items())
))
for src, destD in S
]
dR.writerows(rows)
def to_csv():
for d in ['route_values', 'device_routes']:
try:
os.makedirs(path.join(CSV_DIR,d))
except:
pass
for family, dst_src_map in ni_values.ni_route_values.items():
create_csv(path.join('route_values',family), dst_src_map, iter_src_values)
for device, dst_src_map in ni_values.ni_device_routes.items():
create_csv(path.join('device_routes',device), dst_src_map, iter_src)
if __name__ == '__main__':
to_csv()

View File

@@ -0,0 +1,39 @@
# SPDX-License-Identifier: GPL-2.0+
import os, csv, glob
class CSVCollection(dict):
delimiter=';'
quotechar='"'
source_column_name = 'Sources / Destinations'
"""
This class is a dictionary representation of the collection of sheets that
exist in a given .ODS file.
"""
def __init__(self, pattern, skip_commented_lines=True, strip_lines=True):
super(CSVCollection, self).__init__()
self.pattern = pattern
C = '#' if skip_commented_lines else 'blahblahblah'
if strip_lines:
strip = lambda s:s.strip()
else:
strip = lambda s:s
# load all CSV files
key = self.source_column_name
for fname in glob.glob(pattern):
with open(fname) as F:
dR = csv.DictReader(F, delimiter=self.delimiter,
quotechar=self.quotechar)
name = os.path.basename(fname).partition('.')[0]
D = {
r[key]:{f:strip(c) for f,c in r.items()
if f != key and f[:1] not in ['', C] and
strip(c)[:1] not in ['', C]}
for r in dR if r[key][:1] not in ['', C]
}
# now, go back through and eliminate all empty dictionaries
D = {k:v for k,v in D.items() if v}
self[name] = D

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
from os import path
import os, csv
from csv_collection import CSVCollection
from ni_names import value_to_name
CSV_DIR = 'csv'
def to_csv():
try:
os.makedirs(CSV_DIR)
except:
pass
csv_fname = path.join(CSV_DIR, 'blank_route_table.csv')
fieldnames = [sig for sig_val, sig in sorted(value_to_name.items())]
fieldnames.insert(0, CSVCollection.source_column_name)
with open(csv_fname, 'w') as F_csv:
dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"')
dR.writeheader()
for sig in fieldnames[1:]:
dR.writerow({CSVCollection.source_column_name: sig})
if __name__ == '__main__':
to_csv()

View File

@@ -0,0 +1,734 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright (C) 2018-2019 Netronome Systems, Inc.
# Copyright (C) 2021 Isovalent, Inc.
# In case user attempts to run with Python 2.
from __future__ import print_function
import argparse
import re
import sys, os
class NoHelperFound(BaseException):
pass
class NoSyscallCommandFound(BaseException):
pass
class ParsingError(BaseException):
def __init__(self, line='<line not provided>', reader=None):
if reader:
BaseException.__init__(self,
'Error at file offset %d, parsing line: %s' %
(reader.tell(), line))
else:
BaseException.__init__(self, 'Error parsing line: %s' % line)
class APIElement(object):
"""
An object representing the description of an aspect of the eBPF API.
@proto: prototype of the API symbol
@desc: textual description of the symbol
@ret: (optional) description of any associated return value
"""
def __init__(self, proto='', desc='', ret=''):
self.proto = proto
self.desc = desc
self.ret = ret
class Helper(APIElement):
"""
An object representing the description of an eBPF helper function.
@proto: function prototype of the helper function
@desc: textual description of the helper function
@ret: description of the return value of the helper function
"""
def proto_break_down(self):
"""
Break down helper function protocol into smaller chunks: return type,
name, distincts arguments.
"""
arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$')
res = {}
proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
capture = proto_re.match(self.proto)
res['ret_type'] = capture.group(1)
res['ret_star'] = capture.group(2)
res['name'] = capture.group(3)
res['args'] = []
args = capture.group(4).split(', ')
for a in args:
capture = arg_re.match(a)
res['args'].append({
'type' : capture.group(1),
'star' : capture.group(5),
'name' : capture.group(6)
})
return res
class HeaderParser(object):
"""
An object used to parse a file in order to extract the documentation of a
list of eBPF helper functions. All the helpers that can be retrieved are
stored as Helper object, in the self.helpers() array.
@filename: name of file to parse, usually include/uapi/linux/bpf.h in the
kernel tree
"""
def __init__(self, filename):
self.reader = open(filename, 'r')
self.line = ''
self.helpers = []
self.commands = []
def parse_element(self):
proto = self.parse_symbol()
desc = self.parse_desc()
ret = self.parse_ret()
return APIElement(proto=proto, desc=desc, ret=ret)
def parse_helper(self):
proto = self.parse_proto()
desc = self.parse_desc()
ret = self.parse_ret()
return Helper(proto=proto, desc=desc, ret=ret)
def parse_symbol(self):
p = re.compile(' \* ?(.+)$')
capture = p.match(self.line)
if not capture:
raise NoSyscallCommandFound
end_re = re.compile(' \* ?NOTES$')
end = end_re.match(self.line)
if end:
raise NoSyscallCommandFound
self.line = self.reader.readline()
return capture.group(1)
def parse_proto(self):
# Argument can be of shape:
# - "void"
# - "type name"
# - "type *name"
# - Same as above, with "const" and/or "struct" in front of type
# - "..." (undefined number of arguments, for bpf_trace_printk())
# There is at least one term ("void"), and at most five arguments.
p = re.compile(' \* ?((.+) \**\w+\((((const )?(struct )?(\w+|\.\.\.)( \**\w+)?)(, )?){1,5}\))$')
capture = p.match(self.line)
if not capture:
raise NoHelperFound
self.line = self.reader.readline()
return capture.group(1)
def parse_desc(self):
p = re.compile(' \* ?(?:\t| {5,8})Description$')
capture = p.match(self.line)
if not capture:
# Helper can have empty description and we might be parsing another
# attribute: return but do not consume.
return ''
# Description can be several lines, some of them possibly empty, and it
# stops when another subsection title is met.
desc = ''
while True:
self.line = self.reader.readline()
if self.line == ' *\n':
desc += '\n'
else:
p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
capture = p.match(self.line)
if capture:
desc += capture.group(1) + '\n'
else:
break
return desc
def parse_ret(self):
p = re.compile(' \* ?(?:\t| {5,8})Return$')
capture = p.match(self.line)
if not capture:
# Helper can have empty retval and we might be parsing another
# attribute: return but do not consume.
return ''
# Return value description can be several lines, some of them possibly
# empty, and it stops when another subsection title is met.
ret = ''
while True:
self.line = self.reader.readline()
if self.line == ' *\n':
ret += '\n'
else:
p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
capture = p.match(self.line)
if capture:
ret += capture.group(1) + '\n'
else:
break
return ret
def seek_to(self, target, help_message):
self.reader.seek(0)
offset = self.reader.read().find(target)
if offset == -1:
raise Exception(help_message)
self.reader.seek(offset)
self.reader.readline()
self.reader.readline()
self.line = self.reader.readline()
def parse_syscall(self):
self.seek_to('* DOC: eBPF Syscall Commands',
'Could not find start of eBPF syscall descriptions list')
while True:
try:
command = self.parse_element()
self.commands.append(command)
except NoSyscallCommandFound:
break
def parse_helpers(self):
self.seek_to('* Start of BPF helper function descriptions:',
'Could not find start of eBPF helper descriptions list')
while True:
try:
helper = self.parse_helper()
self.helpers.append(helper)
except NoHelperFound:
break
def run(self):
self.parse_syscall()
self.parse_helpers()
self.reader.close()
###############################################################################
class Printer(object):
"""
A generic class for printers. Printers should be created with an array of
Helper objects, and implement a way to print them in the desired fashion.
@parser: A HeaderParser with objects to print to standard output
"""
def __init__(self, parser):
self.parser = parser
self.elements = []
def print_header(self):
pass
def print_footer(self):
pass
def print_one(self, helper):
pass
def print_all(self):
self.print_header()
for elem in self.elements:
self.print_one(elem)
self.print_footer()
class PrinterRST(Printer):
"""
A generic class for printers that print ReStructured Text. Printers should
be created with a HeaderParser object, and implement a way to print API
elements in the desired fashion.
@parser: A HeaderParser with objects to print to standard output
"""
def __init__(self, parser):
self.parser = parser
def print_license(self):
license = '''\
.. Copyright (C) All BPF authors and contributors from 2014 to present.
.. See git log include/uapi/linux/bpf.h in kernel tree for details.
..
.. %%%LICENSE_START(VERBATIM)
.. Permission is granted to make and distribute verbatim copies of this
.. manual provided the copyright notice and this permission notice are
.. preserved on all copies.
..
.. Permission is granted to copy and distribute modified versions of this
.. manual under the conditions for verbatim copying, provided that the
.. entire resulting derived work is distributed under the terms of a
.. permission notice identical to this one.
..
.. Since the Linux kernel and libraries are constantly changing, this
.. manual page may be incorrect or out-of-date. The author(s) assume no
.. responsibility for errors or omissions, or for damages resulting from
.. the use of the information contained herein. The author(s) may not
.. have taken the same level of care in the production of this manual,
.. which is licensed free of charge, as they might when working
.. professionally.
..
.. Formatted or processed versions of this manual, if unaccompanied by
.. the source, must acknowledge the copyright and authors of this work.
.. %%%LICENSE_END
..
.. Please do not edit this file. It was generated from the documentation
.. located in file include/uapi/linux/bpf.h of the Linux kernel sources
.. (helpers description), and from scripts/bpf_doc.py in the same
.. repository (header and footer).
'''
print(license)
def print_elem(self, elem):
if (elem.desc):
print('\tDescription')
# Do not strip all newline characters: formatted code at the end of
# a section must be followed by a blank line.
for line in re.sub('\n$', '', elem.desc, count=1).split('\n'):
print('{}{}'.format('\t\t' if line else '', line))
if (elem.ret):
print('\tReturn')
for line in elem.ret.rstrip().split('\n'):
print('{}{}'.format('\t\t' if line else '', line))
print('')
class PrinterHelpersRST(PrinterRST):
"""
A printer for dumping collected information about helpers as a ReStructured
Text page compatible with the rst2man program, which can be used to
generate a manual page for the helpers.
@parser: A HeaderParser with Helper objects to print to standard output
"""
def __init__(self, parser):
self.elements = parser.helpers
def print_header(self):
header = '''\
===========
BPF-HELPERS
===========
-------------------------------------------------------------------------------
list of eBPF helper functions
-------------------------------------------------------------------------------
:Manual section: 7
DESCRIPTION
===========
The extended Berkeley Packet Filter (eBPF) subsystem consists in programs
written in a pseudo-assembly language, then attached to one of the several
kernel hooks and run in reaction of specific events. This framework differs
from the older, "classic" BPF (or "cBPF") in several aspects, one of them being
the ability to call special functions (or "helpers") from within a program.
These functions are restricted to a white-list of helpers defined in the
kernel.
These helpers are used by eBPF programs to interact with the system, or with
the context in which they work. For instance, they can be used to print
debugging messages, to get the time since the system was booted, to interact
with eBPF maps, or to manipulate network packets. Since there are several eBPF
program types, and that they do not run in the same context, each program type
can only call a subset of those helpers.
Due to eBPF conventions, a helper can not have more than five arguments.
Internally, eBPF programs call directly into the compiled helper functions
without requiring any foreign-function interface. As a result, calling helpers
introduces no overhead, thus offering excellent performance.
This document is an attempt to list and document the helpers available to eBPF
developers. They are sorted by chronological order (the oldest helpers in the
kernel at the top).
HELPERS
=======
'''
PrinterRST.print_license(self)
print(header)
def print_footer(self):
footer = '''
EXAMPLES
========
Example usage for most of the eBPF helpers listed in this manual page are
available within the Linux kernel sources, at the following locations:
* *samples/bpf/*
* *tools/testing/selftests/bpf/*
LICENSE
=======
eBPF programs can have an associated license, passed along with the bytecode
instructions to the kernel when the programs are loaded. The format for that
string is identical to the one in use for kernel modules (Dual licenses, such
as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
programs that are compatible with the GNU Privacy License (GPL).
In order to use such helpers, the eBPF program must be loaded with the correct
license string passed (via **attr**) to the **bpf**\ () system call, and this
generally translates into the C source code of the program containing a line
similar to the following:
::
char ____license[] __attribute__((section("license"), used)) = "GPL";
IMPLEMENTATION
==============
This manual page is an effort to document the existing eBPF helper functions.
But as of this writing, the BPF sub-system is under heavy development. New eBPF
program or map types are added, along with new helper functions. Some helpers
are occasionally made available for additional program types. So in spite of
the efforts of the community, this page might not be up-to-date. If you want to
check by yourself what helper functions exist in your kernel, or what types of
programs they can support, here are some files among the kernel tree that you
may be interested in:
* *include/uapi/linux/bpf.h* is the main BPF header. It contains the full list
of all helper functions, as well as many other BPF definitions including most
of the flags, structs or constants used by the helpers.
* *net/core/filter.c* contains the definition of most network-related helper
functions, and the list of program types from which they can be used.
* *kernel/trace/bpf_trace.c* is the equivalent for most tracing program-related
helpers.
* *kernel/bpf/verifier.c* contains the functions used to check that valid types
of eBPF maps are used with a given helper function.
* *kernel/bpf/* directory contains other files in which additional helpers are
defined (for cgroups, sockmaps, etc.).
* The bpftool utility can be used to probe the availability of helper functions
on the system (as well as supported program and map types, and a number of
other parameters). To do so, run **bpftool feature probe** (see
**bpftool-feature**\ (8) for details). Add the **unprivileged** keyword to
list features available to unprivileged users.
Compatibility between helper functions and program types can generally be found
in the files where helper functions are defined. Look for the **struct
bpf_func_proto** objects and for functions returning them: these functions
contain a list of helpers that a given program type can call. Note that the
**default:** label of the **switch ... case** used to filter helpers can call
other functions, themselves allowing access to additional helpers. The
requirement for GPL license is also in those **struct bpf_func_proto**.
Compatibility between helper functions and map types can be found in the
**check_map_func_compatibility**\ () function in file *kernel/bpf/verifier.c*.
Helper functions that invalidate the checks on **data** and **data_end**
pointers for network processing are listed in function
**bpf_helper_changes_pkt_data**\ () in file *net/core/filter.c*.
SEE ALSO
========
**bpf**\ (2),
**bpftool**\ (8),
**cgroups**\ (7),
**ip**\ (8),
**perf_event_open**\ (2),
**sendmsg**\ (2),
**socket**\ (7),
**tc-bpf**\ (8)'''
print(footer)
def print_proto(self, helper):
"""
Format function protocol with bold and italics markers. This makes RST
file less readable, but gives nice results in the manual page.
"""
proto = helper.proto_break_down()
print('**%s %s%s(' % (proto['ret_type'],
proto['ret_star'].replace('*', '\\*'),
proto['name']),
end='')
comma = ''
for a in proto['args']:
one_arg = '{}{}'.format(comma, a['type'])
if a['name']:
if a['star']:
one_arg += ' {}**\ '.format(a['star'].replace('*', '\\*'))
else:
one_arg += '** '
one_arg += '*{}*\\ **'.format(a['name'])
comma = ', '
print(one_arg, end='')
print(')**')
def print_one(self, helper):
self.print_proto(helper)
self.print_elem(helper)
class PrinterSyscallRST(PrinterRST):
"""
A printer for dumping collected information about the syscall API as a
ReStructured Text page compatible with the rst2man program, which can be
used to generate a manual page for the syscall.
@parser: A HeaderParser with APIElement objects to print to standard
output
"""
def __init__(self, parser):
self.elements = parser.commands
def print_header(self):
header = '''\
===
bpf
===
-------------------------------------------------------------------------------
Perform a command on an extended BPF object
-------------------------------------------------------------------------------
:Manual section: 2
COMMANDS
========
'''
PrinterRST.print_license(self)
print(header)
def print_one(self, command):
print('**%s**' % (command.proto))
self.print_elem(command)
class PrinterHelpers(Printer):
"""
A printer for dumping collected information about helpers as C header to
be included from BPF program.
@parser: A HeaderParser with Helper objects to print to standard output
"""
def __init__(self, parser):
self.elements = parser.helpers
type_fwds = [
'struct bpf_fib_lookup',
'struct bpf_sk_lookup',
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
'struct bpf_pidns_info',
'struct bpf_redir_neigh',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
'struct bpf_sock_tuple',
'struct bpf_spin_lock',
'struct bpf_sysctl',
'struct bpf_tcp_sock',
'struct bpf_tunnel_key',
'struct bpf_xfrm_state',
'struct linux_binprm',
'struct pt_regs',
'struct sk_reuseport_md',
'struct sockaddr',
'struct tcphdr',
'struct seq_file',
'struct tcp6_sock',
'struct tcp_sock',
'struct tcp_timewait_sock',
'struct tcp_request_sock',
'struct udp6_sock',
'struct task_struct',
'struct __sk_buff',
'struct sk_msg_md',
'struct xdp_md',
'struct path',
'struct btf_ptr',
'struct inode',
'struct socket',
'struct file',
'struct bpf_timer',
]
known_types = {
'...',
'void',
'const void',
'char',
'const char',
'int',
'long',
'unsigned long',
'__be16',
'__be32',
'__wsum',
'struct bpf_fib_lookup',
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
'struct bpf_pidns_info',
'struct bpf_redir_neigh',
'struct bpf_sk_lookup',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
'struct bpf_sock_tuple',
'struct bpf_spin_lock',
'struct bpf_sysctl',
'struct bpf_tcp_sock',
'struct bpf_tunnel_key',
'struct bpf_xfrm_state',
'struct linux_binprm',
'struct pt_regs',
'struct sk_reuseport_md',
'struct sockaddr',
'struct tcphdr',
'struct seq_file',
'struct tcp6_sock',
'struct tcp_sock',
'struct tcp_timewait_sock',
'struct tcp_request_sock',
'struct udp6_sock',
'struct task_struct',
'struct path',
'struct btf_ptr',
'struct inode',
'struct socket',
'struct file',
'struct bpf_timer',
}
mapped_types = {
'u8': '__u8',
'u16': '__u16',
'u32': '__u32',
'u64': '__u64',
's8': '__s8',
's16': '__s16',
's32': '__s32',
's64': '__s64',
'size_t': 'unsigned long',
'struct bpf_map': 'void',
'struct sk_buff': 'struct __sk_buff',
'const struct sk_buff': 'const struct __sk_buff',
'struct sk_msg_buff': 'struct sk_msg_md',
'struct xdp_buff': 'struct xdp_md',
}
# Helpers overloaded for different context types.
overloaded_helpers = [
'bpf_get_socket_cookie',
'bpf_sk_assign',
]
def print_header(self):
header = '''\
/* This is auto-generated file. See bpf_doc.py for details. */
/* Forward declarations of BPF structs */'''
print(header)
for fwd in self.type_fwds:
print('%s;' % fwd)
print('')
def print_footer(self):
footer = ''
print(footer)
def map_type(self, t):
if t in self.known_types:
return t
if t in self.mapped_types:
return self.mapped_types[t]
print("Unrecognized type '%s', please add it to known types!" % t,
file=sys.stderr)
sys.exit(1)
seen_helpers = set()
def print_one(self, helper):
proto = helper.proto_break_down()
if proto['name'] in self.seen_helpers:
return
self.seen_helpers.add(proto['name'])
print('/*')
print(" * %s" % proto['name'])
print(" *")
if (helper.desc):
# Do not strip all newline characters: formatted code at the end of
# a section must be followed by a blank line.
for line in re.sub('\n$', '', helper.desc, count=1).split('\n'):
print(' *{}{}'.format(' \t' if line else '', line))
if (helper.ret):
print(' *')
print(' * Returns')
for line in helper.ret.rstrip().split('\n'):
print(' *{}{}'.format(' \t' if line else '', line))
print(' */')
print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
proto['ret_star'], proto['name']), end='')
comma = ''
for i, a in enumerate(proto['args']):
t = a['type']
n = a['name']
if proto['name'] in self.overloaded_helpers and i == 0:
t = 'void'
n = 'ctx'
one_arg = '{}{}'.format(comma, self.map_type(t))
if n:
if a['star']:
one_arg += ' {}'.format(a['star'])
else:
one_arg += ' '
one_arg += '{}'.format(n)
comma = ', '
print(one_arg, end='')
print(') = (void *) %d;' % len(self.seen_helpers))
print('')
###############################################################################
# If script is launched from scripts/ from kernel tree and can access
# ../include/uapi/linux/bpf.h, use it as a default name for the file to parse,
# otherwise the --filename argument will be required from the command line.
script = os.path.abspath(sys.argv[0])
linuxRoot = os.path.dirname(os.path.dirname(script))
bpfh = os.path.join(linuxRoot, 'include/uapi/linux/bpf.h')
printers = {
'helpers': PrinterHelpersRST,
'syscall': PrinterSyscallRST,
}
argParser = argparse.ArgumentParser(description="""
Parse eBPF header file and generate documentation for the eBPF API.
The RST-formatted output produced can be turned into a manual page with the
rst2man utility.
""")
argParser.add_argument('--header', action='store_true',
help='generate C header file')
if (os.path.isfile(bpfh)):
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h',
default=bpfh)
else:
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h')
argParser.add_argument('target', nargs='?', default='helpers',
choices=printers.keys(), help='eBPF API target')
args = argParser.parse_args()
# Parse file.
headerParser = HeaderParser(args.filename)
headerParser.run()
# Print formatted output to standard output.
if args.header:
if args.target != 'helpers':
raise NotImplementedError('Only helpers header generation is supported')
printer = PrinterHelpers(headerParser)
else:
printer = printers[args.target](headerParser)
printer.print_all()

View File

@@ -0,0 +1,471 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2017 Valentin Rothberg <valentinrothberg@gmail.com>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
import argparse
import difflib
import os
import re
import signal
import subprocess
import sys
from multiprocessing import Pool, cpu_count
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|imply|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_SYMBOL = re.compile(r'(?!\B)' + SYMBOL + r'(?!\B)')
REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
usage = "Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in Kconfig. If no option is specified, " \
"checkkconfigsymbols defaults to check your current tree. " \
"Please note that specifying commits will 'git reset --hard\' " \
"your current tree! You may save uncommitted changes to avoid " \
"losing data."
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-c', '--commit', dest='commit', action='store',
default="",
help="check if the specified commit (hash) introduces "
"undefined Kconfig symbols")
parser.add_argument('-d', '--diff', dest='diff', action='store',
default="",
help="diff undefined symbols between two commits "
"(e.g., -d commmit1..commit2)")
parser.add_argument('-f', '--find', dest='find', action='store_true',
default=False,
help="find and show commits that may cause symbols to be "
"missing (required to run with --diff)")
parser.add_argument('-i', '--ignore', dest='ignore', action='store',
default="",
help="ignore files matching this Python regex "
"(e.g., -i '.*defconfig')")
parser.add_argument('-s', '--sim', dest='sim', action='store', default="",
help="print a list of max. 10 string-similar symbols")
parser.add_argument('--force', dest='force', action='store_true',
default=False,
help="reset current Git tree even when it's dirty")
parser.add_argument('--no-color', dest='color', action='store_false',
default=True,
help="don't print colored output (default when not "
"outputting to a terminal)")
args = parser.parse_args()
if args.commit and args.diff:
sys.exit("Please specify only one option at once.")
if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff):
sys.exit("Please specify valid input in the following format: "
"\'commit1..commit2\'")
if args.commit or args.diff:
if not args.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
if args.commit:
if args.commit.startswith('HEAD'):
sys.exit("The --commit option can't use the HEAD ref")
args.find = False
if args.ignore:
try:
re.match(args.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
return args
def main():
"""Main function of this module."""
args = parse_options()
global COLOR
COLOR = args.color and sys.stdout.isatty()
if args.sim and not args.commit and not args.diff:
sims = find_sims(args.sim, args.ignore)
if sims:
print("%s: %s" % (yel("Similar symbols"), ', '.join(sims)))
else:
print("%s: no similar symbols found" % yel("Similar symbols"))
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
if args.commit or args.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if args.commit:
commit_a = args.commit + "~"
commit_b = args.commit
elif args.diff:
split = args.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
reset(commit_a)
undefined_a, _ = check_symbols(args.ignore)
# get undefined items for the commit
reset(commit_b)
undefined_b, defined = check_symbols(args.ignore)
# report cases that are present for the commit but not before
for symbol in sorted(undefined_b):
# symbol has not been undefined before
if symbol not in undefined_a:
files = sorted(undefined_b.get(symbol))
undefined[symbol] = files
# check if there are new files that reference the undefined symbol
else:
files = sorted(undefined_b.get(symbol) -
undefined_a.get(symbol))
if files:
undefined[symbol] = files
# reset to head
reset(head)
# default to check the entire tree
else:
undefined, defined = check_symbols(args.ignore)
# now print the output
for symbol in sorted(undefined):
print(red(symbol))
files = sorted(undefined.get(symbol))
print("%s: %s" % (yel("Referencing files"), ", ".join(files)))
sims = find_sims(symbol, args.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
print("%s: %s" % (sims_out, ', '.join(sims)))
else:
print("%s: %s" % (sims_out, "no similar symbols found"))
if args.find:
print("%s:" % yel("Commits changing symbol"))
commits = find_commits(symbol, args.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
print("\t- %s (\"%s\")" % (yel(commit[0]), commit[1]))
else:
print("\t- no commit found")
print() # new line
def reset(commit):
"""Reset current git tree to %commit."""
execute(["git", "reset", "--hard", commit])
def yel(string):
"""
Color %string yellow.
"""
return "\033[33m%s\033[0m" % string if COLOR else string
def red(string):
"""
Color %string red.
"""
return "\033[31m%s\033[0m" % string if COLOR else string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
try:
stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False)
stdout = stdout.decode(errors='replace')
except subprocess.CalledProcessError as fail:
exit(fail)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
commits = execute(["git", "log", "--pretty=oneline",
"--abbrev-commit", "-G",
symbol, diff])
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute(["git", "status", "--porcelain"])
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute(["git", "rev-parse", "HEAD"])
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in range(size)]
def init_worker():
"""Set signal handler to ignore SIGINT."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def find_sims(symbol, ignore, defined=[]):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
return difflib.get_close_matches(symbol, set(defined), 10)
pool = Pool(cpu_count(), init_worker)
kfiles = []
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kfiles.append(gitfile)
arglist = []
for part in partition(kfiles, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined.extend(res[0])
return difflib.get_close_matches(symbol, set(defined), 10)
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
stdout = execute(["git", "ls-files"])
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
files = []
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
files.append(gitfile)
return files
def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value. Files matching %ignore are not
checked for undefined symbols."""
pool = Pool(cpu_count(), init_worker)
try:
return check_symbols_helper(pool, ignore)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
def check_symbols_helper(pool, ignore):
"""Helper method for check_symbols(). Used to catch keyboard interrupts in
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
defined_symbols = []
referenced_symbols = dict() # {file: [symbols]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
if ignore and re.match(ignore, gitfile):
continue
# add source files that do not match the ignore pattern
source_files.append(gitfile)
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
referenced_symbols.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined_symbols.extend(res[0])
referenced_symbols.update(res[1])
defined_symbols = set(defined_symbols)
# inverse mapping of referenced_symbols to dict(symbol: [files])
inv_map = dict()
for _file, symbols in referenced_symbols.items():
for symbol in symbols:
inv_map[symbol] = inv_map.get(symbol, set())
inv_map[symbol].add(_file)
referenced_symbols = inv_map
undefined = {} # {symbol: [files]}
for symbol in sorted(referenced_symbols):
# filter some false positives
if symbol == "FOO" or symbol == "BAR" or \
symbol == "FOO_BAR" or symbol == "XXX":
continue
if symbol not in defined_symbols:
if symbol.endswith("_MODULE"):
# avoid false positives for kernel modules
if symbol[:-len("_MODULE")] in defined_symbols:
continue
undefined[symbol] = referenced_symbols.get(symbol)
return undefined, defined_symbols
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
referenced_symbols = dict()
for sfile in source_files:
referenced_symbols[sfile] = parse_source_file(sfile)
return referenced_symbols
def parse_source_file(sfile):
"""Parse @sfile and return a list of referenced Kconfig symbols."""
lines = []
references = []
if not os.path.exists(sfile):
return references
with open(sfile, "r", encoding='utf-8', errors='replace') as stream:
lines = stream.readlines()
for line in lines:
if "CONFIG_" not in line:
continue
symbols = REGEX_SOURCE_SYMBOL.findall(line)
for symbol in symbols:
if not REGEX_FILTER_SYMBOLS.search(symbol):
continue
references.append(symbol)
return references
def get_symbols_in_line(line):
"""Return mentioned Kconfig symbols in @line."""
return REGEX_SYMBOL.findall(line)
def parse_kconfig_files(args):
"""Parse kconfig files and return tuple of defined and references Kconfig
symbols. Note, @args is a tuple of a list of files and the @ignore
pattern."""
kconfig_files = args[0]
ignore = args[1]
defined_symbols = []
referenced_symbols = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
defined_symbols.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
referenced_symbols[kfile] = references
return (defined_symbols, referenced_symbols)
def parse_kconfig_file(kfile):
"""Parse @kfile and update symbol definitions and references."""
lines = []
defined = []
references = []
if not os.path.exists(kfile):
return defined, references
with open(kfile, "r", encoding='utf-8', errors='replace') as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
symbol_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(symbol_def[0])
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
symbols = get_symbols_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
symbols.extend(get_symbols_in_line(line))
for symbol in set(symbols):
if REGEX_NUMERIC.match(symbol):
# ignore numeric values
continue
references.append(symbol)
return defined, references
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,74 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) Google LLC, 2020
#
# Author: Nathan Huckleberry <nhuck@google.com>
#
"""A helper routine run clang-tidy and the clang static-analyzer on
compile_commands.json.
"""
import argparse
import json
import multiprocessing
import os
import subprocess
import sys
def parse_arguments():
"""Set up and parses command-line arguments.
Returns:
args: Dict of parsed args
Has keys: [path, type]
"""
usage = """Run clang-tidy or the clang static-analyzer on a
compilation database."""
parser = argparse.ArgumentParser(description=usage)
type_help = "Type of analysis to be performed"
parser.add_argument("type",
choices=["clang-tidy", "clang-analyzer"],
help=type_help)
path_help = "Path to the compilation database to parse"
parser.add_argument("path", type=str, help=path_help)
return parser.parse_args()
def init(l, a):
global lock
global args
lock = l
args = a
def run_analysis(entry):
# Disable all checks, then re-enable the ones we want
checks = "-checks=-*,"
if args.type == "clang-tidy":
checks += "linuxkernel-*"
else:
checks += "clang-analyzer-*"
p = subprocess.run(["clang-tidy", "-p", args.path, checks, entry["file"]],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=entry["directory"])
with lock:
sys.stderr.buffer.write(p.stdout)
def main():
args = parse_arguments()
lock = multiprocessing.Lock()
pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
# Read JSON data into the datastore variable
with open(args.path, "r") as f:
datastore = json.load(f)
pool.map(run_analysis, datastore)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,76 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) NXP 2019
import gdb
import sys
from linux import utils, lists, constants
clk_core_type = utils.CachedType("struct clk_core")
def clk_core_for_each_child(hlist_head):
return lists.hlist_for_each_entry(hlist_head,
clk_core_type.get_type().pointer(), "child_node")
class LxClkSummary(gdb.Command):
"""Print clk tree summary
Output is a subset of /sys/kernel/debug/clk/clk_summary
No calls are made during printing, instead a (c) if printed after values which
are cached and potentially out of date"""
def __init__(self):
super(LxClkSummary, self).__init__("lx-clk-summary", gdb.COMMAND_DATA)
def show_subtree(self, clk, level):
gdb.write("%*s%-*s %7d %8d %8d %11lu%s\n" % (
level * 3 + 1, "",
30 - level * 3,
clk['name'].string(),
clk['enable_count'],
clk['prepare_count'],
clk['protect_count'],
clk['rate'],
'(c)' if clk['flags'] & constants.LX_CLK_GET_RATE_NOCACHE else ' '))
for child in clk_core_for_each_child(clk['children']):
self.show_subtree(child, level + 1)
def invoke(self, arg, from_tty):
gdb.write(" enable prepare protect \n")
gdb.write(" clock count count count rate \n")
gdb.write("------------------------------------------------------------------------\n")
for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_root_list")):
self.show_subtree(clk, 0)
for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_orphan_list")):
self.show_subtree(clk, 0)
LxClkSummary()
class LxClkCoreLookup(gdb.Function):
"""Find struct clk_core by name"""
def __init__(self):
super(LxClkCoreLookup, self).__init__("lx_clk_core_lookup")
def lookup_hlist(self, hlist_head, name):
for child in clk_core_for_each_child(hlist_head):
if child['name'].string() == name:
return child
result = self.lookup_hlist(child['children'], name)
if result:
return result
def invoke(self, name):
name = name.string()
return (self.lookup_hlist(gdb.parse_and_eval("clk_root_list"), name) or
self.lookup_hlist(gdb.parse_and_eval("clk_orphan_list"), name))
LxClkCoreLookup()

View File

@@ -0,0 +1,154 @@
#
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import sys
from linux import utils
printk_info_type = utils.CachedType("struct printk_info")
prb_data_blk_lpos_type = utils.CachedType("struct prb_data_blk_lpos")
prb_desc_type = utils.CachedType("struct prb_desc")
prb_desc_ring_type = utils.CachedType("struct prb_desc_ring")
prb_data_ring_type = utils.CachedType("struct prb_data_ring")
printk_ringbuffer_type = utils.CachedType("struct printk_ringbuffer")
atomic_long_type = utils.CachedType("atomic_long_t")
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
inf = gdb.inferiors()[0]
# read in prb structure
prb_addr = int(str(gdb.parse_and_eval("(void *)'printk.c'::prb")).split()[0], 16)
sz = printk_ringbuffer_type.get_type().sizeof
prb = utils.read_memoryview(inf, prb_addr, sz).tobytes()
# read in descriptor ring structure
off = printk_ringbuffer_type.get_type()['desc_ring'].bitpos // 8
addr = prb_addr + off
sz = prb_desc_ring_type.get_type().sizeof
desc_ring = utils.read_memoryview(inf, addr, sz).tobytes()
# read in descriptor array
off = prb_desc_ring_type.get_type()['count_bits'].bitpos // 8
desc_ring_count = 1 << utils.read_u32(desc_ring, off)
desc_sz = prb_desc_type.get_type().sizeof
off = prb_desc_ring_type.get_type()['descs'].bitpos // 8
addr = utils.read_ulong(desc_ring, off)
descs = utils.read_memoryview(inf, addr, desc_sz * desc_ring_count).tobytes()
# read in info array
info_sz = printk_info_type.get_type().sizeof
off = prb_desc_ring_type.get_type()['infos'].bitpos // 8
addr = utils.read_ulong(desc_ring, off)
infos = utils.read_memoryview(inf, addr, info_sz * desc_ring_count).tobytes()
# read in text data ring structure
off = printk_ringbuffer_type.get_type()['text_data_ring'].bitpos // 8
addr = prb_addr + off
sz = prb_data_ring_type.get_type().sizeof
text_data_ring = utils.read_memoryview(inf, addr, sz).tobytes()
# read in text data
off = prb_data_ring_type.get_type()['size_bits'].bitpos // 8
text_data_sz = 1 << utils.read_u32(text_data_ring, off)
off = prb_data_ring_type.get_type()['data'].bitpos // 8
addr = utils.read_ulong(text_data_ring, off)
text_data = utils.read_memoryview(inf, addr, text_data_sz).tobytes()
counter_off = atomic_long_type.get_type()['counter'].bitpos // 8
sv_off = prb_desc_type.get_type()['state_var'].bitpos // 8
off = prb_desc_type.get_type()['text_blk_lpos'].bitpos // 8
begin_off = off + (prb_data_blk_lpos_type.get_type()['begin'].bitpos // 8)
next_off = off + (prb_data_blk_lpos_type.get_type()['next'].bitpos // 8)
ts_off = printk_info_type.get_type()['ts_nsec'].bitpos // 8
len_off = printk_info_type.get_type()['text_len'].bitpos // 8
# definitions from kernel/printk/printk_ringbuffer.h
desc_committed = 1
desc_finalized = 2
desc_sv_bits = utils.get_long_type().sizeof * 8
desc_flags_shift = desc_sv_bits - 2
desc_flags_mask = 3 << desc_flags_shift
desc_id_mask = ~desc_flags_mask
# read in tail and head descriptor ids
off = prb_desc_ring_type.get_type()['tail_id'].bitpos // 8
tail_id = utils.read_u64(desc_ring, off + counter_off)
off = prb_desc_ring_type.get_type()['head_id'].bitpos // 8
head_id = utils.read_u64(desc_ring, off + counter_off)
did = tail_id
while True:
ind = did % desc_ring_count
desc_off = desc_sz * ind
info_off = info_sz * ind
# skip non-committed record
state = 3 & (utils.read_u64(descs, desc_off + sv_off +
counter_off) >> desc_flags_shift)
if state != desc_committed and state != desc_finalized:
if did == head_id:
break
did = (did + 1) & desc_id_mask
continue
begin = utils.read_ulong(descs, desc_off + begin_off) % text_data_sz
end = utils.read_ulong(descs, desc_off + next_off) % text_data_sz
# handle data-less record
if begin & 1 == 1:
text = ""
else:
# handle wrapping data block
if begin > end:
begin = 0
# skip over descriptor id
text_start = begin + utils.get_long_type().sizeof
text_len = utils.read_u16(infos, info_off + len_off)
# handle truncated message
if end - text_start < text_len:
text_len = end - text_start
text = text_data[text_start:text_start + text_len].decode(
encoding='utf8', errors='replace')
time_stamp = utils.read_u64(infos, info_off + ts_off)
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line)
# With python2 gdb.write will attempt to convert unicode to
# ascii and might fail so pass an utf8-encoded str instead.
if sys.hexversion < 0x03000000:
msg = msg.encode(encoding='utf8', errors='replace')
gdb.write(msg)
if did == head_id:
break
did = (did + 1) & desc_id_mask
LxDmesg()

View File

@@ -0,0 +1,83 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) NXP 2019
import gdb
import sys
from linux.utils import CachedType
from linux.lists import list_for_each_entry
generic_pm_domain_type = CachedType('struct generic_pm_domain')
pm_domain_data_type = CachedType('struct pm_domain_data')
device_link_type = CachedType('struct device_link')
def kobject_get_path(kobj):
path = kobj['name'].string()
parent = kobj['parent']
if parent:
path = kobject_get_path(parent) + '/' + path
return path
def rtpm_status_str(dev):
if dev['power']['runtime_error']:
return 'error'
if dev['power']['disable_depth']:
return 'unsupported'
_RPM_STATUS_LOOKUP = [
"active",
"resuming",
"suspended",
"suspending"
]
return _RPM_STATUS_LOOKUP[dev['power']['runtime_status']]
class LxGenPDSummary(gdb.Command):
'''Print genpd summary
Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
def __init__(self):
super(LxGenPDSummary, self).__init__('lx-genpd-summary', gdb.COMMAND_DATA)
def summary_one(self, genpd):
if genpd['status'] == 0:
status_string = 'on'
else:
status_string = 'off-{}'.format(genpd['state_idx'])
child_names = []
for link in list_for_each_entry(
genpd['parent_links'],
device_link_type.get_type().pointer(),
'parent_node'):
child_names.append(link['child']['name'])
gdb.write('%-30s %-15s %s\n' % (
genpd['name'].string(),
status_string,
', '.join(child_names)))
# Print devices in domain
for pm_data in list_for_each_entry(genpd['dev_list'],
pm_domain_data_type.get_type().pointer(),
'list_node'):
dev = pm_data['dev']
kobj_path = kobject_get_path(dev['kobj'])
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
def invoke(self, arg, from_tty):
gdb.write('domain status children\n');
gdb.write(' /device runtime status\n');
gdb.write('----------------------------------------------------------------------\n');
for genpd in list_for_each_entry(
gdb.parse_and_eval('&gpd_list'),
generic_pm_domain_type.get_type().pointer(),
'gpd_list_node'):
self.summary_one(genpd)
LxGenPDSummary()

View File

@@ -0,0 +1,95 @@
#
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils, lists
module_type = utils.CachedType("struct module")
def module_list():
global module_type
modules = utils.gdb_eval_or_none("modules")
if modules is None:
return
module_ptr_type = module_type.get_type().pointer()
for module in lists.list_for_each_entry(modules, module_ptr_type, "list"):
yield module
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
layout = module['core_layout']
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(layout['base']).split()[0],
name=module['name'].string(),
size=str(layout['size']),
ref=str(module['refcnt']['counter'] - 1)))
t = self._module_use_type.get_type().pointer()
first = True
sources = module['source_list']
for use in lists.list_for_each_entry(sources, t, "source_list"):
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
gdb.write("\n")
LxLsmod()

View File

@@ -0,0 +1,177 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright 2019 Google LLC.
import gdb
from linux import utils
rb_root_type = utils.CachedType("struct rb_root")
rb_node_type = utils.CachedType("struct rb_node")
def rb_first(root):
if root.type == rb_root_type.get_type():
node = root.address.cast(rb_root_type.get_type().pointer())
elif root.type != rb_root_type.get_type().pointer():
raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
node = root['rb_node']
if node == 0:
return None
while node['rb_left']:
node = node['rb_left']
return node
def rb_last(root):
if root.type == rb_root_type.get_type():
node = root.address.cast(rb_root_type.get_type().pointer())
elif root.type != rb_root_type.get_type().pointer():
raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
node = root['rb_node']
if node == 0:
return None
while node['rb_right']:
node = node['rb_right']
return node
def rb_parent(node):
parent = gdb.Value(node['__rb_parent_color'] & ~3)
return parent.cast(rb_node_type.get_type().pointer())
def rb_empty_node(node):
return node['__rb_parent_color'] == node.address
def rb_next(node):
if node.type == rb_node_type.get_type():
node = node.address.cast(rb_node_type.get_type().pointer())
elif node.type != rb_node_type.get_type().pointer():
raise gdb.GdbError("Must be struct rb_node not {}".format(node.type))
if rb_empty_node(node):
return None
if node['rb_right']:
node = node['rb_right']
while node['rb_left']:
node = node['rb_left']
return node
parent = rb_parent(node)
while parent and node == parent['rb_right']:
node = parent
parent = rb_parent(node)
return parent
def rb_prev(node):
if node.type == rb_node_type.get_type():
node = node.address.cast(rb_node_type.get_type().pointer())
elif node.type != rb_node_type.get_type().pointer():
raise gdb.GdbError("Must be struct rb_node not {}".format(node.type))
if rb_empty_node(node):
return None
if node['rb_left']:
node = node['rb_left']
while node['rb_right']:
node = node['rb_right']
return node.dereference()
parent = rb_parent(node)
while parent and node == parent['rb_left'].dereference():
node = parent
parent = rb_parent(node)
return parent
class LxRbFirst(gdb.Function):
"""Lookup and return a node from an RBTree
$lx_rb_first(root): Return the node at the given index.
If index is omitted, the root node is dereferenced and returned."""
def __init__(self):
super(LxRbFirst, self).__init__("lx_rb_first")
def invoke(self, root):
result = rb_first(root)
if result is None:
raise gdb.GdbError("No entry in tree")
return result
LxRbFirst()
class LxRbLast(gdb.Function):
"""Lookup and return a node from an RBTree.
$lx_rb_last(root): Return the node at the given index.
If index is omitted, the root node is dereferenced and returned."""
def __init__(self):
super(LxRbLast, self).__init__("lx_rb_last")
def invoke(self, root):
result = rb_last(root)
if result is None:
raise gdb.GdbError("No entry in tree")
return result
LxRbLast()
class LxRbNext(gdb.Function):
"""Lookup and return a node from an RBTree.
$lx_rb_next(node): Return the node at the given index.
If index is omitted, the root node is dereferenced and returned."""
def __init__(self):
super(LxRbNext, self).__init__("lx_rb_next")
def invoke(self, node):
result = rb_next(node)
if result is None:
raise gdb.GdbError("No entry in tree")
return result
LxRbNext()
class LxRbPrev(gdb.Function):
"""Lookup and return a node from an RBTree.
$lx_rb_prev(node): Return the node at the given index.
If index is omitted, the root node is dereferenced and returned."""
def __init__(self):
super(LxRbPrev, self).__init__("lx_rb_prev")
def invoke(self, node):
result = rb_prev(node)
if result is None:
raise gdb.GdbError("No entry in tree")
return result
LxRbPrev()

View File

@@ -0,0 +1,39 @@
#
# gdb helper commands and functions for Linux kernel debugging
#
# loader module
#
# Copyright (c) Siemens AG, 2012, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import os
sys.path.insert(0, os.path.dirname(__file__) + "/scripts/gdb")
try:
gdb.parse_and_eval("0")
gdb.execute("", to_string=True)
except:
gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
"work.\n")
else:
import linux.utils
import linux.symbols
import linux.modules
import linux.dmesg
import linux.tasks
import linux.config
import linux.cpus
import linux.lists
import linux.rbtree
import linux.proc
import linux.constants
import linux.timerlist
import linux.clk
import linux.genpd
import linux.device

View File

@@ -0,0 +1,128 @@
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
#
# show_deltas: Read list of printk messages instrumented with
# time data, and format with time deltas.
#
# Also, you can show the times relative to a fixed point.
#
# Copyright 2003 Sony Corporation
#
import sys
import string
def usage():
print ("""usage: show_delta [<options>] <filename>
This program parses the output from a set of printk message lines which
have time data prefixed because the CONFIG_PRINTK_TIME option is set, or
the kernel command line option "time" is specified. When run with no
options, the time information is converted to show the time delta between
each printk line and the next. When run with the '-b' option, all times
are relative to a single (base) point in time.
Options:
-h Show this usage help.
-b <base> Specify a base for time references.
<base> can be a number or a string.
If it is a string, the first message line
which matches (at the beginning of the
line) is used as the time reference.
ex: $ dmesg >timefile
$ show_delta -b NET4 timefile
will show times relative to the line in the kernel output
starting with "NET4".
""")
sys.exit(1)
# returns a tuple containing the seconds and text for each message line
# seconds is returned as a float
# raise an exception if no timing data was found
def get_time(line):
if line[0]!="[":
raise ValueError
# split on closing bracket
(time_str, rest) = string.split(line[1:],']',1)
time = string.atof(time_str)
#print "time=", time
return (time, rest)
# average line looks like:
# [ 0.084282] VFS: Mounted root (romfs filesystem) readonly
# time data is expressed in seconds.useconds,
# convert_line adds a delta for each line
last_time = 0.0
def convert_line(line, base_time):
global last_time
try:
(time, rest) = get_time(line)
except:
# if any problem parsing time, don't convert anything
return line
if base_time:
# show time from base
delta = time - base_time
else:
# just show time from last line
delta = time - last_time
last_time = time
return ("[%5.6f < %5.6f >]" % (time, delta)) + rest
def main():
base_str = ""
filein = ""
for arg in sys.argv[1:]:
if arg=="-b":
base_str = sys.argv[sys.argv.index("-b")+1]
elif arg=="-h":
usage()
else:
filein = arg
if not filein:
usage()
try:
lines = open(filein,"r").readlines()
except:
print ("Problem opening file: %s" % filein)
sys.exit(1)
if base_str:
print ('base= "%s"' % base_str)
# assume a numeric base. If that fails, try searching
# for a matching line.
try:
base_time = float(base_str)
except:
# search for line matching <base> string
found = 0
for line in lines:
try:
(time, rest) = get_time(line)
except:
continue
if string.find(rest, base_str)==1:
base_time = time
found = 1
# stop at first match
break
if not found:
print ('Couldn\'t find line matching base pattern "%s"' % base_str)
sys.exit(1)
else:
base_time = 0.0
for line in lines:
print (convert_line(line, base_time),)
main()

View File

@@ -0,0 +1,270 @@
#!/usr/bin/env drgn
#
# Copyright (C) 2019 Tejun Heo <tj@kernel.org>
# Copyright (C) 2019 Facebook
desc = """
This is a drgn script to monitor the blk-iocost cgroup controller.
See the comment at the top of block/blk-iocost.c for more details.
For drgn, visit https://github.com/osandov/drgn.
"""
import sys
import re
import time
import json
import math
import drgn
from drgn import container_of
from drgn.helpers.linux.list import list_for_each_entry,list_empty
from drgn.helpers.linux.radixtree import radix_tree_for_each,radix_tree_lookup
import argparse
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('devname', metavar='DEV',
help='Target block device name (e.g. sda)')
parser.add_argument('--cgroup', action='append', metavar='REGEX',
help='Regex for target cgroups, ')
parser.add_argument('--interval', '-i', metavar='SECONDS', type=float, default=1,
help='Monitoring interval in seconds (0 exits immediately '
'after checking requirements)')
parser.add_argument('--json', action='store_true',
help='Output in json')
args = parser.parse_args()
def err(s):
print(s, file=sys.stderr, flush=True)
sys.exit(1)
try:
blkcg_root = prog['blkcg_root']
plid = prog['blkcg_policy_iocost'].plid.value_()
except:
err('The kernel does not have iocost enabled')
IOC_RUNNING = prog['IOC_RUNNING'].value_()
WEIGHT_ONE = prog['WEIGHT_ONE'].value_()
VTIME_PER_SEC = prog['VTIME_PER_SEC'].value_()
VTIME_PER_USEC = prog['VTIME_PER_USEC'].value_()
AUTOP_SSD_FAST = prog['AUTOP_SSD_FAST'].value_()
AUTOP_SSD_DFL = prog['AUTOP_SSD_DFL'].value_()
AUTOP_SSD_QD1 = prog['AUTOP_SSD_QD1'].value_()
AUTOP_HDD = prog['AUTOP_HDD'].value_()
autop_names = {
AUTOP_SSD_FAST: 'ssd_fast',
AUTOP_SSD_DFL: 'ssd_dfl',
AUTOP_SSD_QD1: 'ssd_qd1',
AUTOP_HDD: 'hdd',
}
class BlkgIterator:
def blkcg_name(blkcg):
return blkcg.css.cgroup.kn.name.string_().decode('utf-8')
def walk(self, blkcg, q_id, parent_path):
if not self.include_dying and \
not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()):
return
name = BlkgIterator.blkcg_name(blkcg)
path = parent_path + '/' + name if parent_path else name
blkg = drgn.Object(prog, 'struct blkcg_gq',
address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id))
if not blkg.address_:
return
self.blkgs.append((path if path else '/', blkg))
for c in list_for_each_entry('struct blkcg',
blkcg.css.children.address_of_(), 'css.sibling'):
self.walk(c, q_id, path)
def __init__(self, root_blkcg, q_id, include_dying=False):
self.include_dying = include_dying
self.blkgs = []
self.walk(root_blkcg, q_id, '')
def __iter__(self):
return iter(self.blkgs)
class IocStat:
def __init__(self, ioc):
global autop_names
self.enabled = ioc.enabled.value_()
self.running = ioc.running.value_() == IOC_RUNNING
self.period_ms = ioc.period_us.value_() / 1_000
self.period_at = ioc.period_at.value_() / 1_000_000
self.vperiod_at = ioc.period_at_vtime.value_() / VTIME_PER_SEC
self.vrate_pct = ioc.vtime_base_rate.value_() * 100 / VTIME_PER_USEC
self.busy_level = ioc.busy_level.value_()
self.autop_idx = ioc.autop_idx.value_()
self.user_cost_model = ioc.user_cost_model.value_()
self.user_qos_params = ioc.user_qos_params.value_()
if self.autop_idx in autop_names:
self.autop_name = autop_names[self.autop_idx]
else:
self.autop_name = '?'
def dict(self, now):
return { 'device' : devname,
'timestamp' : now,
'enabled' : self.enabled,
'running' : self.running,
'period_ms' : self.period_ms,
'period_at' : self.period_at,
'period_vtime_at' : self.vperiod_at,
'busy_level' : self.busy_level,
'vrate_pct' : self.vrate_pct, }
def table_preamble_str(self):
state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF'
output = f'{devname} {state:4} ' \
f'per={self.period_ms}ms ' \
f'cur_per={self.period_at:.3f}:v{self.vperiod_at:.3f} ' \
f'busy={self.busy_level:+3} ' \
f'vrate={self.vrate_pct:6.2f}% ' \
f'params={self.autop_name}'
if self.user_cost_model or self.user_qos_params:
output += f'({"C" if self.user_cost_model else ""}{"Q" if self.user_qos_params else ""})'
return output
def table_header_str(self):
return f'{"":25} active {"weight":>9} {"hweight%":>13} {"inflt%":>6} ' \
f'{"debt":>7} {"delay":>7} {"usage%"}'
class IocgStat:
def __init__(self, iocg):
ioc = iocg.ioc
blkg = iocg.pd.blkg
self.is_active = not list_empty(iocg.active_list.address_of_())
self.weight = iocg.weight.value_() / WEIGHT_ONE
self.active = iocg.active.value_() / WEIGHT_ONE
self.inuse = iocg.inuse.value_() / WEIGHT_ONE
self.hwa_pct = iocg.hweight_active.value_() * 100 / WEIGHT_ONE
self.hwi_pct = iocg.hweight_inuse.value_() * 100 / WEIGHT_ONE
self.address = iocg.value_()
vdone = iocg.done_vtime.counter.value_()
vtime = iocg.vtime.counter.value_()
vrate = ioc.vtime_rate.counter.value_()
period_vtime = ioc.period_us.value_() * vrate
if period_vtime:
self.inflight_pct = (vtime - vdone) * 100 / period_vtime
else:
self.inflight_pct = 0
self.usage = (100 * iocg.usage_delta_us.value_() /
ioc.period_us.value_()) if self.active else 0
self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
if blkg.use_delay.counter.value_() != 0:
self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
else:
self.delay_ms = 0
def dict(self, now, path):
out = { 'cgroup' : path,
'timestamp' : now,
'is_active' : self.is_active,
'weight' : self.weight,
'weight_active' : self.active,
'weight_inuse' : self.inuse,
'hweight_active_pct' : self.hwa_pct,
'hweight_inuse_pct' : self.hwi_pct,
'inflight_pct' : self.inflight_pct,
'debt_ms' : self.debt_ms,
'delay_ms' : self.delay_ms,
'usage_pct' : self.usage,
'address' : self.address }
return out
def table_row_str(self, path):
out = f'{path[-28:]:28} ' \
f'{"*" if self.is_active else " "} ' \
f'{round(self.inuse):5}/{round(self.active):5} ' \
f'{self.hwi_pct:6.2f}/{self.hwa_pct:6.2f} ' \
f'{self.inflight_pct:6.2f} ' \
f'{self.debt_ms:7.2f} ' \
f'{self.delay_ms:7.2f} '\
f'{min(self.usage, 999):6.2f}'
out = out.rstrip(':')
return out
# handle args
table_fmt = not args.json
interval = args.interval
devname = args.devname
if args.json:
table_fmt = False
re_str = None
if args.cgroup:
for r in args.cgroup:
if re_str is None:
re_str = r
else:
re_str += '|' + r
filter_re = re.compile(re_str) if re_str else None
# Locate the roots
q_id = None
root_iocg = None
ioc = None
for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()):
blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr)
try:
if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'):
q_id = blkg.q.id.value_()
if blkg.pd[plid]:
root_iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
ioc = root_iocg.ioc
break
except:
pass
if ioc is None:
err(f'Could not find ioc for {devname}');
if interval == 0:
sys.exit(0)
# Keep printing
while True:
now = time.time()
iocstat = IocStat(ioc)
output = ''
if table_fmt:
output += '\n' + iocstat.table_preamble_str()
output += '\n' + iocstat.table_header_str()
else:
output += json.dumps(iocstat.dict(now))
for path, blkg in BlkgIterator(blkcg_root, q_id):
if filter_re and not filter_re.match(path):
continue
if not blkg.pd[plid]:
continue
iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
iocg_stat = IocgStat(iocg)
if not filter_re and not iocg_stat.is_active:
continue
if table_fmt:
output += '\n' + iocg_stat.table_row_str(path)
else:
output += '\n' + json.dumps(iocg_stat.dict(now, path))
print(output)
sys.stdout.flush()
time.sleep(interval)

View File

@@ -0,0 +1,226 @@
#!/usr/bin/env drgn
#
# Copyright (C) 2020 Roman Gushchin <guro@fb.com>
# Copyright (C) 2020 Facebook
from os import stat
import argparse
import sys
from drgn.helpers.linux import list_for_each_entry, list_empty
from drgn.helpers.linux import for_each_page
from drgn.helpers.linux.cpumask import for_each_online_cpu
from drgn.helpers.linux.percpu import per_cpu_ptr
from drgn import container_of, FaultError, Object
DESC = """
This is a drgn script to provide slab statistics for memory cgroups.
It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo
interface of cgroup v1.
For drgn, visit https://github.com/osandov/drgn.
"""
MEMCGS = {}
OO_SHIFT = 16
OO_MASK = ((1 << OO_SHIFT) - 1)
def err(s):
print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True)
sys.exit(1)
def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''):
if not list_empty(css.children.address_of_()):
for css in list_for_each_entry('struct cgroup_subsys_state',
css.children.address_of_(),
'sibling'):
name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8')
memcg = container_of(css, 'struct mem_cgroup', 'css')
MEMCGS[css.cgroup.kn.id.value_()] = memcg
find_memcg_ids(css, name)
def is_root_cache(s):
try:
return False if s.memcg_params.root_cache else True
except AttributeError:
return True
def cache_name(s):
if is_root_cache(s):
return s.name.string_().decode('utf-8')
else:
return s.memcg_params.root_cache.name.string_().decode('utf-8')
# SLUB
def oo_order(s):
return s.oo.x >> OO_SHIFT
def oo_objects(s):
return s.oo.x & OO_MASK
def count_partial(n, fn):
nr_pages = 0
for page in list_for_each_entry('struct page', n.partial.address_of_(),
'lru'):
nr_pages += fn(page)
return nr_pages
def count_free(page):
return page.objects - page.inuse
def slub_get_slabinfo(s, cfg):
nr_slabs = 0
nr_objs = 0
nr_free = 0
for node in range(cfg['nr_nodes']):
n = s.node[node]
nr_slabs += n.nr_slabs.counter.value_()
nr_objs += n.total_objects.counter.value_()
nr_free += count_partial(n, count_free)
return {'active_objs': nr_objs - nr_free,
'num_objs': nr_objs,
'active_slabs': nr_slabs,
'num_slabs': nr_slabs,
'objects_per_slab': oo_objects(s),
'cache_order': oo_order(s),
'limit': 0,
'batchcount': 0,
'shared': 0,
'shared_avail': 0}
def cache_show(s, cfg, objs):
if cfg['allocator'] == 'SLUB':
sinfo = slub_get_slabinfo(s, cfg)
else:
err('SLAB isn\'t supported yet')
if cfg['shared_slab_pages']:
sinfo['active_objs'] = objs
sinfo['num_objs'] = objs
print('%-17s %6lu %6lu %6u %4u %4d'
' : tunables %4u %4u %4u'
' : slabdata %6lu %6lu %6lu' % (
cache_name(s), sinfo['active_objs'], sinfo['num_objs'],
s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'],
sinfo['limit'], sinfo['batchcount'], sinfo['shared'],
sinfo['active_slabs'], sinfo['num_slabs'],
sinfo['shared_avail']))
def detect_kernel_config():
cfg = {}
cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
if prog.type('struct kmem_cache').members[1].name == 'flags':
cfg['allocator'] = 'SLUB'
elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
cfg['allocator'] = 'SLAB'
else:
err('Can\'t determine the slab allocator')
cfg['shared_slab_pages'] = False
try:
if prog.type('struct obj_cgroup'):
cfg['shared_slab_pages'] = True
except:
pass
return cfg
def for_each_slab_page(prog):
PGSlab = 1 << prog.constant('PG_slab')
PGHead = 1 << prog.constant('PG_head')
for page in for_each_page(prog):
try:
if page.flags.value_() & PGSlab:
yield page
except FaultError:
pass
def main():
parser = argparse.ArgumentParser(description=DESC,
formatter_class=
argparse.RawTextHelpFormatter)
parser.add_argument('cgroup', metavar='CGROUP',
help='Target memory cgroup')
args = parser.parse_args()
try:
cgroup_id = stat(args.cgroup).st_ino
find_memcg_ids()
memcg = MEMCGS[cgroup_id]
except KeyError:
err('Can\'t find the memory cgroup')
cfg = detect_kernel_config()
print('# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>'
' : tunables <limit> <batchcount> <sharedfactor>'
' : slabdata <active_slabs> <num_slabs> <sharedavail>')
if cfg['shared_slab_pages']:
obj_cgroups = set()
stats = {}
caches = {}
# find memcg pointers belonging to the specified cgroup
obj_cgroups.add(memcg.objcg.value_())
for ptr in list_for_each_entry('struct obj_cgroup',
memcg.objcg_list.address_of_(),
'list'):
obj_cgroups.add(ptr.value_())
# look over all slab pages, belonging to non-root memcgs
# and look for objects belonging to the given memory cgroup
for page in for_each_slab_page(prog):
objcg_vec_raw = page.memcg_data.value_()
if objcg_vec_raw == 0:
continue
cache = page.slab_cache
if not cache:
continue
addr = cache.value_()
caches[addr] = cache
# clear the lowest bit to get the true obj_cgroups
objcg_vec = Object(prog, 'struct obj_cgroup **',
value=objcg_vec_raw & ~1)
if addr not in stats:
stats[addr] = 0
for i in range(oo_objects(cache)):
if objcg_vec[i].value_() in obj_cgroups:
stats[addr] += 1
for addr in caches:
if stats[addr] > 0:
cache_show(caches[addr], cfg, stats[addr])
else:
for s in list_for_each_entry('struct kmem_cache',
memcg.kmem_caches.address_of_(),
'memcg_params.kmem_caches_node'):
cache_show(s, cfg, None)
main()

View File

@@ -0,0 +1,376 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Program to allow users to fuzz test Hyper-V drivers
# by interfacing with Hyper-V debugfs attributes.
# Current test methods available:
# 1. delay testing
#
# Current file/directory structure of hyper-V debugfs:
# /sys/kernel/debug/hyperv/UUID
# /sys/kernel/debug/hyperv/UUID/<test-state filename>
# /sys/kernel/debug/hyperv/UUID/<test-method sub-directory>
#
# author: Branden Bonaby <brandonbonaby94@gmail.com>
import os
import cmd
import argparse
import glob
from argparse import RawDescriptionHelpFormatter
from argparse import RawTextHelpFormatter
from enum import Enum
# Do not change unless, you change the debugfs attributes
# in /drivers/hv/debugfs.c. All fuzz testing
# attributes will start with "fuzz_test".
# debugfs path for hyperv must exist before proceeding
debugfs_hyperv_path = "/sys/kernel/debug/hyperv"
if not os.path.isdir(debugfs_hyperv_path):
print("{} doesn't exist/check permissions".format(debugfs_hyperv_path))
exit(-1)
class dev_state(Enum):
off = 0
on = 1
# File names, that correspond to the files created in
# /drivers/hv/debugfs.c
class f_names(Enum):
state_f = "fuzz_test_state"
buff_f = "fuzz_test_buffer_interrupt_delay"
mess_f = "fuzz_test_message_delay"
# Both single_actions and all_actions are used
# for error checking and to allow for some subparser
# names to be abbreviated. Do not abbreviate the
# test method names, as it will become less intuitive
# as to what the user can do. If you do decide to
# abbreviate the test method name, make sure the main
# function reflects this change.
all_actions = [
"disable_all",
"D",
"enable_all",
"view_all",
"V"
]
single_actions = [
"disable_single",
"d",
"enable_single",
"view_single",
"v"
]
def main():
file_map = recursive_file_lookup(debugfs_hyperv_path, dict())
args = parse_args()
if (not args.action):
print ("Error, no options selected...exiting")
exit(-1)
arg_set = { k for (k,v) in vars(args).items() if v and k != "action" }
arg_set.add(args.action)
path = args.path if "path" in arg_set else None
if (path and path[-1] == "/"):
path = path[:-1]
validate_args_path(path, arg_set, file_map)
if (path and "enable_single" in arg_set):
state_path = locate_state(path, file_map)
set_test_state(state_path, dev_state.on.value, args.quiet)
# Use subparsers as the key for different actions
if ("delay" in arg_set):
validate_delay_values(args.delay_time)
if (args.enable_all):
set_delay_all_devices(file_map, args.delay_time,
args.quiet)
else:
set_delay_values(path, file_map, args.delay_time,
args.quiet)
elif ("disable_all" in arg_set or "D" in arg_set):
disable_all_testing(file_map)
elif ("disable_single" in arg_set or "d" in arg_set):
disable_testing_single_device(path, file_map)
elif ("view_all" in arg_set or "V" in arg_set):
get_all_devices_test_status(file_map)
elif ("view_single" in arg_set or "v" in arg_set):
get_device_test_values(path, file_map)
# Get the state location
def locate_state(device, file_map):
return file_map[device][f_names.state_f.value]
# Validate delay values to make sure they are acceptable to
# enable delays on a device
def validate_delay_values(delay):
if (delay[0] == -1 and delay[1] == -1):
print("\nError, At least 1 value must be greater than 0")
exit(-1)
for i in delay:
if (i < -1 or i == 0 or i > 1000):
print("\nError, Values must be equal to -1 "
"or be > 0 and <= 1000")
exit(-1)
# Validate argument path
def validate_args_path(path, arg_set, file_map):
if (not path and any(element in arg_set for element in single_actions)):
print("Error, path (-p) REQUIRED for the specified option. "
"Use (-h) to check usage.")
exit(-1)
elif (path and any(item in arg_set for item in all_actions)):
print("Error, path (-p) NOT REQUIRED for the specified option. "
"Use (-h) to check usage." )
exit(-1)
elif (path not in file_map and any(item in arg_set
for item in single_actions)):
print("Error, path '{}' not a valid vmbus device".format(path))
exit(-1)
# display Testing status of single device
def get_device_test_values(path, file_map):
for name in file_map[path]:
file_location = file_map[path][name]
print( name + " = " + str(read_test_files(file_location)))
# Create a map of the vmbus devices and their associated files
# [key=device, value = [key = filename, value = file path]]
def recursive_file_lookup(path, file_map):
for f_path in glob.iglob(path + '**/*'):
if (os.path.isfile(f_path)):
if (f_path.rsplit("/",2)[0] == debugfs_hyperv_path):
directory = f_path.rsplit("/",1)[0]
else:
directory = f_path.rsplit("/",2)[0]
f_name = f_path.split("/")[-1]
if (file_map.get(directory)):
file_map[directory].update({f_name:f_path})
else:
file_map[directory] = {f_name:f_path}
elif (os.path.isdir(f_path)):
recursive_file_lookup(f_path,file_map)
return file_map
# display Testing state of devices
def get_all_devices_test_status(file_map):
for device in file_map:
if (get_test_state(locate_state(device, file_map)) is 1):
print("Testing = ON for: {}"
.format(device.split("/")[5]))
else:
print("Testing = OFF for: {}"
.format(device.split("/")[5]))
# read the vmbus device files, path must be absolute path before calling
def read_test_files(path):
try:
with open(path,"r") as f:
file_value = f.readline().strip()
return int(file_value)
except IOError as e:
errno, strerror = e.args
print("I/O error({0}): {1} on file {2}"
.format(errno, strerror, path))
exit(-1)
except ValueError:
print ("Element to int conversion error in: \n{}".format(path))
exit(-1)
# writing to vmbus device files, path must be absolute path before calling
def write_test_files(path, value):
try:
with open(path,"w") as f:
f.write("{}".format(value))
except IOError as e:
errno, strerror = e.args
print("I/O error({0}): {1} on file {2}"
.format(errno, strerror, path))
exit(-1)
# set testing state of device
def set_test_state(state_path, state_value, quiet):
write_test_files(state_path, state_value)
if (get_test_state(state_path) is 1):
if (not quiet):
print("Testing = ON for device: {}"
.format(state_path.split("/")[5]))
else:
if (not quiet):
print("Testing = OFF for device: {}"
.format(state_path.split("/")[5]))
# get testing state of device
def get_test_state(state_path):
#state == 1 - test = ON
#state == 0 - test = OFF
return read_test_files(state_path)
# write 1 - 1000 microseconds, into a single device using the
# fuzz_test_buffer_interrupt_delay and fuzz_test_message_delay
# debugfs attributes
def set_delay_values(device, file_map, delay_length, quiet):
try:
interrupt = file_map[device][f_names.buff_f.value]
message = file_map[device][f_names.mess_f.value]
# delay[0]- buffer interrupt delay, delay[1]- message delay
if (delay_length[0] >= 0 and delay_length[0] <= 1000):
write_test_files(interrupt, delay_length[0])
if (delay_length[1] >= 0 and delay_length[1] <= 1000):
write_test_files(message, delay_length[1])
if (not quiet):
print("Buffer delay testing = {} for: {}"
.format(read_test_files(interrupt),
interrupt.split("/")[5]))
print("Message delay testing = {} for: {}"
.format(read_test_files(message),
message.split("/")[5]))
except IOError as e:
errno, strerror = e.args
print("I/O error({0}): {1} on files {2}{3}"
.format(errno, strerror, interrupt, message))
exit(-1)
# enabling delay testing on all devices
def set_delay_all_devices(file_map, delay, quiet):
for device in (file_map):
set_test_state(locate_state(device, file_map),
dev_state.on.value,
quiet)
set_delay_values(device, file_map, delay, quiet)
# disable all testing on a SINGLE device.
def disable_testing_single_device(device, file_map):
for name in file_map[device]:
file_location = file_map[device][name]
write_test_files(file_location, dev_state.off.value)
print("ALL testing now OFF for {}".format(device.split("/")[-1]))
# disable all testing on ALL devices
def disable_all_testing(file_map):
for device in file_map:
disable_testing_single_device(device, file_map)
def parse_args():
parser = argparse.ArgumentParser(prog = "vmbus_testing",usage ="\n"
"%(prog)s [delay] [-h] [-e|-E] -t [-p]\n"
"%(prog)s [view_all | V] [-h]\n"
"%(prog)s [disable_all | D] [-h]\n"
"%(prog)s [disable_single | d] [-h|-p]\n"
"%(prog)s [view_single | v] [-h|-p]\n"
"%(prog)s --version\n",
description = "\nUse lsvmbus to get vmbus device type "
"information.\n" "\nThe debugfs root path is "
"/sys/kernel/debug/hyperv",
formatter_class = RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest = "action")
parser.add_argument("--version", action = "version",
version = '%(prog)s 0.1.0')
parser.add_argument("-q","--quiet", action = "store_true",
help = "silence none important test messages."
" This will only work when enabling testing"
" on a device.")
# Use the path parser to hold the --path attribute so it can
# be shared between subparsers. Also do the same for the state
# parser, as all testing methods will use --enable_all and
# enable_single.
path_parser = argparse.ArgumentParser(add_help=False)
path_parser.add_argument("-p","--path", metavar = "",
help = "Debugfs path to a vmbus device. The path "
"must be the absolute path to the device.")
state_parser = argparse.ArgumentParser(add_help=False)
state_group = state_parser.add_mutually_exclusive_group(required = True)
state_group.add_argument("-E", "--enable_all", action = "store_const",
const = "enable_all",
help = "Enable the specified test type "
"on ALL vmbus devices.")
state_group.add_argument("-e", "--enable_single",
action = "store_const",
const = "enable_single",
help = "Enable the specified test type on a "
"SINGLE vmbus device.")
parser_delay = subparsers.add_parser("delay",
parents = [state_parser, path_parser],
help = "Delay the ring buffer interrupt or the "
"ring buffer message reads in microseconds.",
prog = "vmbus_testing",
usage = "%(prog)s [-h]\n"
"%(prog)s -E -t [value] [value]\n"
"%(prog)s -e -t [value] [value] -p",
description = "Delay the ring buffer interrupt for "
"vmbus devices, or delay the ring buffer message "
"reads for vmbus devices (both in microseconds). This "
"is only on the host to guest channel.")
parser_delay.add_argument("-t", "--delay_time", metavar = "", nargs = 2,
type = check_range, default =[0,0], required = (True),
help = "Set [buffer] & [message] delay time. "
"Value constraints: -1 == value "
"or 0 < value <= 1000.\n"
"Use -1 to keep the previous value for that delay "
"type, or a value > 0 <= 1000 to change the delay "
"time.")
parser_dis_all = subparsers.add_parser("disable_all",
aliases = ['D'], prog = "vmbus_testing",
usage = "%(prog)s [disable_all | D] -h\n"
"%(prog)s [disable_all | D]\n",
help = "Disable ALL testing on ALL vmbus devices.",
description = "Disable ALL testing on ALL vmbus "
"devices.")
parser_dis_single = subparsers.add_parser("disable_single",
aliases = ['d'],
parents = [path_parser], prog = "vmbus_testing",
usage = "%(prog)s [disable_single | d] -h\n"
"%(prog)s [disable_single | d] -p\n",
help = "Disable ALL testing on a SINGLE vmbus device.",
description = "Disable ALL testing on a SINGLE vmbus "
"device.")
parser_view_all = subparsers.add_parser("view_all", aliases = ['V'],
help = "View the test state for ALL vmbus devices.",
prog = "vmbus_testing",
usage = "%(prog)s [view_all | V] -h\n"
"%(prog)s [view_all | V]\n",
description = "This shows the test state for ALL the "
"vmbus devices.")
parser_view_single = subparsers.add_parser("view_single",
aliases = ['v'],parents = [path_parser],
help = "View the test values for a SINGLE vmbus "
"device.",
description = "This shows the test values for a SINGLE "
"vmbus device.", prog = "vmbus_testing",
usage = "%(prog)s [view_single | v] -h\n"
"%(prog)s [view_single | v] -p")
return parser.parse_args()
# value checking for range checking input in parser
def check_range(arg1):
try:
val = int(arg1)
except ValueError as err:
raise argparse.ArgumentTypeError(str(err))
if val < -1 or val > 1000:
message = ("\n\nvalue must be -1 or 0 < value <= 1000. "
"Value program received: {}\n").format(val)
raise argparse.ArgumentTypeError(message)
return val
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,97 @@
# EventClass.py
# SPDX-License-Identifier: GPL-2.0
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
from __future__ import print_function
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
(self.name, self.symbol, self.comm, self.dso))
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf

View File

@@ -0,0 +1,91 @@
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from __future__ import print_function
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if key not in dict:
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
'ppc64le' : audit.MACH_PPC64LE,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print("Install the audit-libs-python package to get syscall names.\n"
"For example:\n # apt-get install python-audit (Ubuntu)"
"\n # yum install audit-libs-python (Fedora)"
"\n etc.\n")
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr

View File

@@ -0,0 +1,84 @@
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
from __future__ import print_function
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print("trace_begin")
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print("call_site=%u, ptr=%u, bytes_req=%u, "
"bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print("%-20s %5u %05u.%09u %8u %-20s " %
(event_name, cpu, secs, nsecs, pid, comm),
end=' ')
# print trace fields not included in handler args
def print_uncommon(context):
print("common_preempt_count=%d, common_flags=%s, "
"common_lock_depth=%d, " %
(common_pc(context), trace_flag_str(common_flags(context)),
common_lock_depth(context)))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print("\nunhandled events:\n")
print("%-40s %10s" % ("event", "count"))
print("%-40s %10s" % ("----------------------------------------",
"-----------"))
for event_name in keys:
print("%-40s %10d\n" % (event_name, unhandled[event_name]))

View File

@@ -0,0 +1,311 @@
# report time spent in compaction
# Licensed under the terms of the GNU GPL License version 2
# testing:
# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
import os
import sys
import re
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
class popt:
DISP_DFL = 0
DISP_PROC = 1
DISP_PROC_VERBOSE=2
class topt:
DISP_TIME = 0
DISP_MIG = 1
DISP_ISOLFREE = 2
DISP_ISOLMIG = 4
DISP_ALL = 7
class comm_filter:
def __init__(self, re):
self.re = re
def filter(self, pid, comm):
m = self.re.search(comm)
return m == None or m.group() == ""
class pid_filter:
def __init__(self, low, high):
self.low = (0 if low == "" else int(low))
self.high = (0 if high == "" else int(high))
def filter(self, pid, comm):
return not (pid >= self.low and (self.high == 0 or pid <= self.high))
def set_type(t):
global opt_disp
opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
def ns(sec, nsec):
return (sec * 1000000000) + nsec
def time(ns):
return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
class pair:
def __init__(self, aval, bval, alabel = None, blabel = None):
self.alabel = alabel
self.blabel = blabel
self.aval = aval
self.bval = bval
def __add__(self, rhs):
self.aval += rhs.aval
self.bval += rhs.bval
return self
def __str__(self):
return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
class cnode:
def __init__(self, ns):
self.ns = ns
self.migrated = pair(0, 0, "moved", "failed")
self.fscan = pair(0,0, "scanned", "isolated")
self.mscan = pair(0,0, "scanned", "isolated")
def __add__(self, rhs):
self.ns += rhs.ns
self.migrated += rhs.migrated
self.fscan += rhs.fscan
self.mscan += rhs.mscan
return self
def __str__(self):
prev = 0
s = "%s " % time(self.ns)
if (opt_disp & topt.DISP_MIG):
s += "migration: %s" % self.migrated
prev = 1
if (opt_disp & topt.DISP_ISOLFREE):
s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
prev = 1
if (opt_disp & topt.DISP_ISOLMIG):
s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
return s
def complete(self, secs, nsecs):
self.ns = ns(secs, nsecs) - self.ns
def increment(self, migrated, fscan, mscan):
if (migrated != None):
self.migrated += migrated
if (fscan != None):
self.fscan += fscan
if (mscan != None):
self.mscan += mscan
class chead:
heads = {}
val = cnode(0);
fobj = None
@classmethod
def add_filter(cls, filter):
cls.fobj = filter
@classmethod
def create_pending(cls, pid, comm, start_secs, start_nsecs):
filtered = 0
try:
head = cls.heads[pid]
filtered = head.is_filtered()
except KeyError:
if cls.fobj != None:
filtered = cls.fobj.filter(pid, comm)
head = cls.heads[pid] = chead(comm, pid, filtered)
if not filtered:
head.mark_pending(start_secs, start_nsecs)
@classmethod
def increment_pending(cls, pid, migrated, fscan, mscan):
head = cls.heads[pid]
if not head.is_filtered():
if head.is_pending():
head.do_increment(migrated, fscan, mscan)
else:
sys.stderr.write("missing start compaction event for pid %d\n" % pid)
@classmethod
def complete_pending(cls, pid, secs, nsecs):
head = cls.heads[pid]
if not head.is_filtered():
if head.is_pending():
head.make_complete(secs, nsecs)
else:
sys.stderr.write("missing start compaction event for pid %d\n" % pid)
@classmethod
def gen(cls):
if opt_proc != popt.DISP_DFL:
for i in cls.heads:
yield cls.heads[i]
@classmethod
def str(cls):
return cls.val
def __init__(self, comm, pid, filtered):
self.comm = comm
self.pid = pid
self.val = cnode(0)
self.pending = None
self.filtered = filtered
self.list = []
def __add__(self, rhs):
self.ns += rhs.ns
self.val += rhs.val
return self
def mark_pending(self, secs, nsecs):
self.pending = cnode(ns(secs, nsecs))
def do_increment(self, migrated, fscan, mscan):
self.pending.increment(migrated, fscan, mscan)
def make_complete(self, secs, nsecs):
self.pending.complete(secs, nsecs)
chead.val += self.pending
if opt_proc != popt.DISP_DFL:
self.val += self.pending
if opt_proc == popt.DISP_PROC_VERBOSE:
self.list.append(self.pending)
self.pending = None
def enumerate(self):
if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
for i, pelem in enumerate(self.list):
sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
def is_pending(self):
return self.pending != None
def is_filtered(self):
return self.filtered
def display(self):
if not self.is_filtered():
sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
def trace_end():
sys.stdout.write("total: %s\n" % chead.str())
for i in chead.gen():
i.display(),
i.enumerate()
def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, nr_migrated, nr_failed):
chead.increment_pending(common_pid,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
def compaction__mm_compaction_end(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, zone_start, migrate_start, free_start, zone_end,
sync, status):
chead.complete_pending(common_pid, common_secs, common_nsecs)
def compaction__mm_compaction_begin(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, zone_start, migrate_start, free_start, zone_end,
sync):
chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
def pr_help():
global usage
sys.stdout.write(usage)
sys.stdout.write("\n")
sys.stdout.write("-h display this help\n")
sys.stdout.write("-p display by process\n")
sys.stdout.write("-pv display by process (verbose)\n")
sys.stdout.write("-t display stall times only\n")
sys.stdout.write("-m display stats for migration\n")
sys.stdout.write("-fs display stats for free scanner\n")
sys.stdout.write("-ms display stats for migration scanner\n")
sys.stdout.write("-u display results in microseconds (default nanoseconds)\n")
comm_re = None
pid_re = None
pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
opt_proc = popt.DISP_DFL
opt_disp = topt.DISP_ALL
opt_ns = True
argc = len(sys.argv) - 1
if argc >= 1:
pid_re = re.compile(pid_regex)
for i, opt in enumerate(sys.argv[1:]):
if opt[0] == "-":
if opt == "-h":
pr_help()
exit(0);
elif opt == "-p":
opt_proc = popt.DISP_PROC
elif opt == "-pv":
opt_proc = popt.DISP_PROC_VERBOSE
elif opt == '-u':
opt_ns = False
elif opt == "-t":
set_type(topt.DISP_TIME)
elif opt == "-m":
set_type(topt.DISP_MIG)
elif opt == "-fs":
set_type(topt.DISP_ISOLFREE)
elif opt == "-ms":
set_type(topt.DISP_ISOLMIG)
else:
sys.exit(usage)
elif i == argc - 1:
m = pid_re.search(opt)
if m != None and m.group() != "":
if m.group(3) != None:
f = pid_filter(m.group(3), m.group(3))
else:
f = pid_filter(m.group(1), m.group(2))
else:
try:
comm_re=re.compile(opt)
except:
sys.stderr.write("invalid regex '%s'" % opt)
sys.exit(usage)
f = comm_filter(comm_re)
chead.add_filter(f)

View File

@@ -0,0 +1,192 @@
# event_analyzing_sample.py: general event handler in python
# SPDX-License-Identifier: GPL-2.0
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
from __future__ import print_function
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print("In trace_begin:\n")
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return
print("Statistics about the general events grouped by thread/symbol/dso: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dso
print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return
print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
def trace_unhandled(event_name, context, event_fields_dict):
print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,796 @@
# export-to-sqlite.py: export perf data to a sqlite3 database
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
from __future__ import print_function
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-sqlite for Qt sqlite3 support.
#
# Examples of installing pyside:
#
# ubuntu:
#
# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
#
# Alternately, to use Python3 and/or pyside 2, one of the following:
#
# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
# fedora:
#
# $ sudo yum install python-pyside
#
# Alternately, to use Python3 and/or pyside 2, one of the following:
# $ sudo yum install python3-pyside
# $ pip install --user PySide2
# $ pip3 install --user PySide2
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
# 2017-07-31 14:26:07.326913 Creating database...
# 2017-07-31 14:26:07.538097 Writing records...
# 2017-07-31 14:26:09.889292 Adding indexes
# 2017-07-31 14:26:09.958746 Done
#
# To browse the database, sqlite3 can be used e.g.
#
# $ sqlite3 pt_example
# sqlite> .header on
# sqlite> select * from samples_view where id < 10;
# sqlite> .mode column
# sqlite> select * from samples_view where id < 10;
# sqlite> .tables
# sqlite> .schema samples_view
# sqlite> .quit
#
# An example of using the database is provided by the script
# exported-sql-viewer.py. Refer to that script for details.
#
# The database structure is practically the same as created by the script
# export-to-postgresql.py. Refer to that script for details. A notable
# difference is the 'transaction' column of the 'samples' table which is
# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.
pyside_version_1 = True
if not "pyside-version-1" in sys.argv:
try:
from PySide2.QtSql import *
pyside_version_1 = False
except:
pass
if pyside_version_1:
from PySide.QtSql import *
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
def printdate(*args, **kw_args):
print(datetime.datetime.today(), *args, sep=' ', **kw_args)
def usage():
printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
printerr("where: columns 'all' or 'branches'");
printerr(" calls 'calls' => create calls and call_paths table");
printerr(" callchains 'callchains' => create call_paths table");
printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1");
raise Exception("Too few or bad arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
for i in range(3,len(sys.argv)):
if (sys.argv[i] == "calls"):
perf_db_export_calls = True
elif (sys.argv[i] == "callchains"):
perf_db_export_callchains = True
elif (sys.argv[i] == "pyside-version-1"):
pass
else:
usage()
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
def do_query_(q):
if (q.exec_()):
return
raise Exception("Query failed: " + q.lastError().text())
printdate("Creating database ...")
db_exists = False
try:
f = open(dbname)
f.close()
db_exists = True
except:
pass
if db_exists:
raise Exception(dbname + " already exists")
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'PRAGMA journal_mode = OFF')
do_query(query, 'BEGIN TRANSACTION')
do_query(query, 'CREATE TABLE selected_events ('
'id integer NOT NULL PRIMARY KEY,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id integer NOT NULL PRIMARY KEY,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id integer NOT NULL PRIMARY KEY,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id integer NOT NULL PRIMARY KEY,'
'comm varchar(16),'
'c_thread_id bigint,'
'c_time bigint,'
'exec_flag boolean)')
do_query(query, 'CREATE TABLE comm_threads ('
'id integer NOT NULL PRIMARY KEY,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id integer NOT NULL PRIMARY KEY,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id integer NOT NULL PRIMARY KEY,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL PRIMARY KEY,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id integer NOT NULL PRIMARY KEY,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
else:
do_query(query, 'CREATE TABLE samples ('
'id integer NOT NULL PRIMARY KEY,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction_ bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
'id integer NOT NULL PRIMARY KEY,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE calls ('
'id integer NOT NULL PRIMARY KEY,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer,'
'parent_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
do_query(query, 'CREATE TABLE ptwrite ('
'id integer NOT NULL PRIMARY KEY,'
'payload bigint,'
'exact_ip integer)')
do_query(query, 'CREATE TABLE cbr ('
'id integer NOT NULL PRIMARY KEY,'
'cbr integer,'
'mhz integer,'
'percent integer)')
do_query(query, 'CREATE TABLE mwait ('
'id integer NOT NULL PRIMARY KEY,'
'hints integer,'
'extensions integer)')
do_query(query, 'CREATE TABLE pwre ('
'id integer NOT NULL PRIMARY KEY,'
'cstate integer,'
'subcstate integer,'
'hw integer)')
do_query(query, 'CREATE TABLE exstop ('
'id integer NOT NULL PRIMARY KEY,'
'exact_ip integer)')
do_query(query, 'CREATE TABLE pwrx ('
'id integer NOT NULL PRIMARY KEY,'
'deepest_cstate integer,'
'last_cstate integer,'
'wake_reason integer)')
do_query(query, 'CREATE TABLE context_switches ('
'id integer NOT NULL PRIMARY KEY,'
'machine_id bigint,'
'time bigint,'
'cpu integer,'
'thread_out_id bigint,'
'comm_out_id bigint,'
'thread_in_id bigint,'
'comm_in_id bigint,'
'flags integer)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
try:
do_query(query, 'SELECT printf("") FROM machines')
sqlite_has_printf = True
except:
pass
def emit_to_hex(x):
if sqlite_has_printf:
return 'printf("%x", ' + x + ')'
else:
return x
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
'id,'
'pid,'
'root_dir,'
'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
' FROM machines')
do_query(query, 'CREATE VIEW dsos_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'short_name,'
'long_name,'
'build_id'
' FROM dsos')
do_query(query, 'CREATE VIEW symbols_view AS '
'SELECT '
'id,'
'name,'
'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
'dso_id,'
'sym_start,'
'sym_end,'
'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
' FROM symbols')
do_query(query, 'CREATE VIEW threads_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'process_id,'
'pid,'
'tid'
' FROM threads')
do_query(query, 'CREATE VIEW comm_threads_view AS '
'SELECT '
'comm_id,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
+ emit_to_hex('c.ip') + ' AS ip,'
'c.symbol_id,'
'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
'(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
'c.parent_id,'
+ emit_to_hex('p.ip') + ' AS parent_ip,'
'p.symbol_id AS parent_symbol_id,'
'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'call_path_id,'
+ emit_to_hex('ip') + ' AS ip,'
'symbol_id,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'call_time,'
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,'
'call_id,'
'return_id,'
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
'parent_call_path_id,'
'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
+ emit_to_hex('ip') + ' AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
+ emit_to_hex('to_ip') + ' AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC'
' FROM samples')
do_query(query, 'CREATE VIEW ptwrite_view AS '
'SELECT '
'ptwrite.id,'
'time,'
'cpu,'
+ emit_to_hex('payload') + ' AS payload_hex,'
'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
' FROM ptwrite'
' INNER JOIN samples ON samples.id = ptwrite.id')
do_query(query, 'CREATE VIEW cbr_view AS '
'SELECT '
'cbr.id,'
'time,'
'cpu,'
'cbr,'
'mhz,'
'percent'
' FROM cbr'
' INNER JOIN samples ON samples.id = cbr.id')
do_query(query, 'CREATE VIEW mwait_view AS '
'SELECT '
'mwait.id,'
'time,'
'cpu,'
+ emit_to_hex('hints') + ' AS hints_hex,'
+ emit_to_hex('extensions') + ' AS extensions_hex'
' FROM mwait'
' INNER JOIN samples ON samples.id = mwait.id')
do_query(query, 'CREATE VIEW pwre_view AS '
'SELECT '
'pwre.id,'
'time,'
'cpu,'
'cstate,'
'subcstate,'
'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw'
' FROM pwre'
' INNER JOIN samples ON samples.id = pwre.id')
do_query(query, 'CREATE VIEW exstop_view AS '
'SELECT '
'exstop.id,'
'time,'
'cpu,'
'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
' FROM exstop'
' INNER JOIN samples ON samples.id = exstop.id')
do_query(query, 'CREATE VIEW pwrx_view AS '
'SELECT '
'pwrx.id,'
'time,'
'cpu,'
'deepest_cstate,'
'last_cstate,'
'CASE WHEN wake_reason=1 THEN \'Interrupt\''
' WHEN wake_reason=2 THEN \'Timer Deadline\''
' WHEN wake_reason=4 THEN \'Monitored Address\''
' WHEN wake_reason=8 THEN \'HW\''
' ELSE wake_reason '
'END AS wake_reason'
' FROM pwrx'
' INNER JOIN samples ON samples.id = pwrx.id')
do_query(query, 'CREATE VIEW power_events_view AS '
'SELECT '
'samples.id,'
'time,'
'cpu,'
'selected_events.name AS event,'
'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,'
'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,'
'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,'
'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,'
'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,'
'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,'
'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,'
'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,'
'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,'
'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,'
'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,'
'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT '
'CASE WHEN wake_reason=1 THEN \'Interrupt\''
' WHEN wake_reason=2 THEN \'Timer Deadline\''
' WHEN wake_reason=4 THEN \'Monitored Address\''
' WHEN wake_reason=8 THEN \'HW\''
' ELSE wake_reason '
'END'
' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason'
' FROM samples'
' INNER JOIN selected_events ON selected_events.id = evsel_id'
' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')')
do_query(query, 'CREATE VIEW context_switches_view AS '
'SELECT '
'context_switches.id,'
'context_switches.machine_id,'
'context_switches.time,'
'context_switches.cpu,'
'th_out.pid AS pid_out,'
'th_out.tid AS tid_out,'
'comm_out.comm AS comm_out,'
'th_in.pid AS pid_in,'
'th_in.tid AS tid_in,'
'comm_in.comm AS comm_in,'
'CASE WHEN context_switches.flags = 0 THEN \'in\''
' WHEN context_switches.flags = 1 THEN \'out\''
' WHEN context_switches.flags = 3 THEN \'out preempt\''
' ELSE context_switches.flags '
'END AS flags'
' FROM context_switches'
' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id'
' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id'
' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id'
' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id')
do_query(query, 'END TRANSACTION')
evsel_query = QSqlQuery(db)
evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
machine_query = QSqlQuery(db)
machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
thread_query = QSqlQuery(db)
thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
comm_query = QSqlQuery(db)
comm_query.prepare("INSERT INTO comms VALUES (?, ?, ?, ?, ?)")
comm_thread_query = QSqlQuery(db)
comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
dso_query = QSqlQuery(db)
dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
symbol_query = QSqlQuery(db)
symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
branch_type_query = QSqlQuery(db)
branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
sample_query = QSqlQuery(db)
if branches:
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
else:
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
if perf_db_export_calls or perf_db_export_callchains:
call_path_query = QSqlQuery(db)
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
ptwrite_query = QSqlQuery(db)
ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)")
cbr_query = QSqlQuery(db)
cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)")
mwait_query = QSqlQuery(db)
mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)")
pwre_query = QSqlQuery(db)
pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)")
exstop_query = QSqlQuery(db)
exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)")
pwrx_query = QSqlQuery(db)
pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)")
context_switch_query = QSqlQuery(db)
context_switch_query.prepare("INSERT INTO context_switches VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
printdate("Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown", 0, 0, 0)
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def is_table_empty(table_name):
do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
if query.next():
return False
return True
def drop(table_name):
do_query(query, 'DROP VIEW ' + table_name + '_view');
do_query(query, 'DROP TABLE ' + table_name);
def trace_end():
do_query(query, 'END TRANSACTION')
printdate("Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
do_query(query, 'ALTER TABLE comms ADD has_calls boolean')
do_query(query, 'UPDATE comms SET has_calls = 1 WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)')
printdate("Dropping unused tables")
if is_table_empty("ptwrite"):
drop("ptwrite")
if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
do_query(query, 'DROP VIEW power_events_view');
drop("mwait")
drop("pwre")
drop("exstop")
drop("pwrx")
if is_table_empty("cbr"):
drop("cbr")
if is_table_empty("context_switches"):
drop("context_switches")
if (unhandled_count):
printdate("Warning: ", unhandled_count, " unhandled events")
printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def bind_exec(q, n, x):
for xx in x[0:n]:
q.addBindValue(str(xx))
do_query_(q)
def evsel_table(*x):
bind_exec(evsel_query, 2, x)
def machine_table(*x):
bind_exec(machine_query, 3, x)
def thread_table(*x):
bind_exec(thread_query, 5, x)
def comm_table(*x):
bind_exec(comm_query, 5, x)
def comm_thread_table(*x):
bind_exec(comm_thread_query, 3, x)
def dso_table(*x):
bind_exec(dso_query, 5, x)
def symbol_table(*x):
bind_exec(symbol_query, 6, x)
def branch_type_table(*x):
bind_exec(branch_type_query, 2, x)
def sample_table(*x):
if branches:
for xx in x[0:15]:
sample_query.addBindValue(str(xx))
for xx in x[19:24]:
sample_query.addBindValue(str(xx))
do_query_(sample_query)
else:
bind_exec(sample_query, 24, x)
def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
bind_exec(call_query, 14, x)
def ptwrite(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
ptwrite_query.addBindValue(str(id))
ptwrite_query.addBindValue(str(payload))
ptwrite_query.addBindValue(str(exact_ip))
do_query_(ptwrite_query)
def cbr(id, raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
MHz = (data[4] + 500) / 1000
percent = ((cbr * 1000 / data[2]) + 5) / 10
cbr_query.addBindValue(str(id))
cbr_query.addBindValue(str(cbr))
cbr_query.addBindValue(str(MHz))
cbr_query.addBindValue(str(percent))
do_query_(cbr_query)
def mwait(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
mwait_query.addBindValue(str(id))
mwait_query.addBindValue(str(hints))
mwait_query.addBindValue(str(extensions))
do_query_(mwait_query)
def pwre(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
pwre_query.addBindValue(str(id))
pwre_query.addBindValue(str(cstate))
pwre_query.addBindValue(str(subcstate))
pwre_query.addBindValue(str(hw))
do_query_(pwre_query)
def exstop(id, raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
exstop_query.addBindValue(str(id))
exstop_query.addBindValue(str(exact_ip))
do_query_(exstop_query)
def pwrx(id, raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
pwrx_query.addBindValue(str(id))
pwrx_query.addBindValue(str(deepest_cstate))
pwrx_query.addBindValue(str(last_cstate))
pwrx_query.addBindValue(str(wake_reason))
do_query_(pwrx_query)
def synth_data(id, config, raw_buf, *x):
if config == 0:
ptwrite(id, raw_buf)
elif config == 1:
mwait(id, raw_buf)
elif config == 2:
pwre(id, raw_buf)
elif config == 3:
exstop(id, raw_buf)
elif config == 4:
pwrx(id, raw_buf)
elif config == 5:
cbr(id, raw_buf)
def context_switch_table(*x):
bind_exec(context_switch_query, 9, x)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,79 @@
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
from __future__ import print_function
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print("Press control+C to stop and show the summary")
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print("\nsyscall errors for %s:\n" % (for_comm))
else:
print("\nsyscall errors:\n")
print("%-30s %10s" % ("comm [pid]", "count"))
print("%-30s %10s" % ("------------------------------", "----------"))
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print("\n%s [%d]" % (comm, pid))
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print(" syscall: %-16s" % syscall_name(id))
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
print(" err = %-20s %10d" % (strerror(ret), val))

View File

@@ -0,0 +1,355 @@
# SPDX-License-Identifier: GPL-2.0
# intel-pt-events.py: Print Intel PT Events including Power Events and PTWRITE
# Copyright (c) 2017-2021, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
from __future__ import print_function
import os
import sys
import struct
import argparse
from libxed import LibXED
from ctypes import create_string_buffer, addressof
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import perf_set_itrace_options, \
perf_sample_insn, perf_sample_srccode
try:
broken_pipe_exception = BrokenPipeError
except:
broken_pipe_exception = IOError
glb_switch_str = None
glb_switch_printed = True
glb_insn = False
glb_disassembler = None
glb_src = False
glb_source_file_name = None
glb_line_number = None
glb_dso = None
def get_optional_null(perf_dict, field):
if field in perf_dict:
return perf_dict[field]
return ""
def get_optional_zero(perf_dict, field):
if field in perf_dict:
return perf_dict[field]
return 0
def get_optional_bytes(perf_dict, field):
if field in perf_dict:
return perf_dict[field]
return bytes()
def get_optional(perf_dict, field):
if field in perf_dict:
return perf_dict[field]
return "[unknown]"
def get_offset(perf_dict, field):
if field in perf_dict:
return "+%#x" % perf_dict[field]
return ""
def trace_begin():
ap = argparse.ArgumentParser(usage = "", add_help = False)
ap.add_argument("--insn-trace", action='store_true')
ap.add_argument("--src-trace", action='store_true')
global glb_args
global glb_insn
global glb_src
glb_args = ap.parse_args()
if glb_args.insn_trace:
print("Intel PT Instruction Trace")
itrace = "i0nsepwx"
glb_insn = True
elif glb_args.src_trace:
print("Intel PT Source Trace")
itrace = "i0nsepwx"
glb_insn = True
glb_src = True
else:
print("Intel PT Branch Trace, Power Events and PTWRITE")
itrace = "bepwx"
global glb_disassembler
try:
glb_disassembler = LibXED()
except:
glb_disassembler = None
perf_set_itrace_options(perf_script_context, itrace)
def trace_end():
print("End")
def trace_unhandled(event_name, context, event_fields_dict):
print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
end=' ')
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
print("IP: %u" % (exact_ip), end=' ')
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
print("deepest cstate: %u last cstate: %u wake reason: %#x" %
(deepest_cstate, last_cstate, wake_reason), end=' ')
def print_psb(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
offset = data[1]
print("offset: %#x" % (offset), end=' ')
def common_start_str(comm, sample):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
return "%16s %5u/%-5u [%03u] %9u.%09u " % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000)
def print_common_start(comm, sample, name):
flags_disp = get_optional_null(sample, "flags_disp")
# Unused fields:
# period = sample["period"]
# phys_addr = sample["phys_addr"]
# weight = sample["weight"]
# transaction = sample["transaction"]
# cpumode = get_optional_zero(sample, "cpumode")
print(common_start_str(comm, sample) + "%7s %19s" % (name, flags_disp), end=' ')
def print_instructions_start(comm, sample):
if "x" in get_optional_null(sample, "flags"):
print(common_start_str(comm, sample) + "x", end=' ')
else:
print(common_start_str(comm, sample), end=' ')
def disassem(insn, ip):
inst = glb_disassembler.Instruction()
glb_disassembler.SetMode(inst, 0) # Assume 64-bit
buf = create_string_buffer(64)
buf.value = insn
return glb_disassembler.DisassembleOne(inst, addressof(buf), len(insn), ip)
def print_common_ip(param_dict, sample, symbol, dso):
ip = sample["ip"]
offs = get_offset(param_dict, "symoff")
if "cyc_cnt" in sample:
cyc_cnt = sample["cyc_cnt"]
insn_cnt = get_optional_zero(sample, "insn_cnt")
ipc_str = " IPC: %#.2f (%u/%u)" % (insn_cnt / cyc_cnt, insn_cnt, cyc_cnt)
else:
ipc_str = ""
if glb_insn and glb_disassembler is not None:
insn = perf_sample_insn(perf_script_context)
if insn and len(insn):
cnt, text = disassem(insn, ip)
byte_str = ("%x" % ip).rjust(16)
if sys.version_info.major >= 3:
for k in range(cnt):
byte_str += " %02x" % insn[k]
else:
for k in xrange(cnt):
byte_str += " %02x" % ord(insn[k])
print("%-40s %-30s" % (byte_str, text), end=' ')
print("%s%s (%s)" % (symbol, offs, dso), end=' ')
else:
print("%16x %s%s (%s)" % (ip, symbol, offs, dso), end=' ')
if "addr_correlates_sym" in sample:
addr = sample["addr"]
dso = get_optional(sample, "addr_dso")
symbol = get_optional(sample, "addr_symbol")
offs = get_offset(sample, "addr_symoff")
print("=> %x %s%s (%s)%s" % (addr, symbol, offs, dso, ipc_str))
else:
print(ipc_str)
def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
ip = sample["ip"]
if symbol == "[unknown]":
start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40)
else:
offs = get_offset(param_dict, "symoff")
start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40)
if with_insn and glb_insn and glb_disassembler is not None:
insn = perf_sample_insn(perf_script_context)
if insn and len(insn):
cnt, text = disassem(insn, ip)
start_str += text.ljust(30)
global glb_source_file_name
global glb_line_number
global glb_dso
source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context)
if source_file_name:
if glb_line_number == line_number and glb_source_file_name == source_file_name:
src_str = ""
else:
if len(source_file_name) > 40:
src_file = ("..." + source_file_name[-37:]) + " "
else:
src_file = source_file_name.ljust(41)
if source_line is None:
src_str = src_file + str(line_number).rjust(4) + " <source not found>"
else:
src_str = src_file + str(line_number).rjust(4) + " " + source_line
glb_dso = None
elif dso == glb_dso:
src_str = ""
else:
src_str = dso
glb_dso = dso
glb_line_number = line_number
glb_source_file_name = source_file_name
print(start_str, src_str)
def do_process_event(param_dict):
global glb_switch_printed
if not glb_switch_printed:
print(glb_switch_str)
glb_switch_printed = True
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Unused fields:
# callchain = param_dict["callchain"]
# brstack = param_dict["brstack"]
# brstacksym = param_dict["brstacksym"]
# Symbol and dso info are not always resolved
dso = get_optional(param_dict, "dso")
symbol = get_optional(param_dict, "symbol")
if name[0:12] == "instructions":
if glb_src:
print_srccode(comm, param_dict, sample, symbol, dso, True)
else:
print_instructions_start(comm, sample)
print_common_ip(param_dict, sample, symbol, dso)
elif name[0:8] == "branches":
if glb_src:
print_srccode(comm, param_dict, sample, symbol, dso, False)
else:
print_common_start(comm, sample, name)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "ptwrite":
print_common_start(comm, sample, name)
print_ptwrite(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "cbr":
print_common_start(comm, sample, name)
print_cbr(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "mwait":
print_common_start(comm, sample, name)
print_mwait(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "pwre":
print_common_start(comm, sample, name)
print_pwre(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "exstop":
print_common_start(comm, sample, name)
print_exstop(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "pwrx":
print_common_start(comm, sample, name)
print_pwrx(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
elif name == "psb":
print_common_start(comm, sample, name)
print_psb(raw_buf)
print_common_ip(param_dict, sample, symbol, dso)
else:
print_common_start(comm, sample, name)
print_common_ip(param_dict, sample, symbol, dso)
def process_event(param_dict):
try:
do_process_event(param_dict)
except broken_pipe_exception:
# Stop python printing broken pipe errors and traceback
sys.stdout = open(os.devnull, 'w')
sys.exit(1)
def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
try:
print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" %
("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip))
except broken_pipe_exception:
# Stop python printing broken pipe errors and traceback
sys.stdout = open(os.devnull, 'w')
sys.exit(1)
def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
global glb_switch_printed
global glb_switch_str
if out:
out_str = "Switch out "
else:
out_str = "Switch In "
if out_preempt:
preempt_str = "preempt"
else:
preempt_str = ""
if machine_pid == -1:
machine_str = ""
else:
machine_str = "machine PID %d" % machine_pid
glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
(out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
glb_switch_printed = False

View File

@@ -0,0 +1,107 @@
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# libxed.py: Python wrapper for libxed.so
# Copyright (c) 2014-2021, Intel Corporation.
# To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
import sys
from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result

View File

@@ -0,0 +1,100 @@
# mem-phys-addr.py: Resolve physical address samples
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2018, Intel Corporation.
from __future__ import division
from __future__ import print_function
import os
import sys
import struct
import re
import bisect
import collections
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
#physical address ranges for System RAM
system_ram = []
#physical address ranges for Persistent Memory
pmem = []
#file object for proc iomem
f = None
#Count for each type of memory
load_mem_type_cnt = collections.Counter()
#perf event name
event_name = None
def parse_iomem():
global f
f = open('/proc/iomem', 'r')
for i, j in enumerate(f):
m = re.split('-|:',j,2)
if m[2].strip() == 'System RAM':
system_ram.append(int(m[0], 16))
system_ram.append(int(m[1], 16))
if m[2].strip() == 'Persistent Memory':
pmem.append(int(m[0], 16))
pmem.append(int(m[1], 16))
def print_memory_type():
print("Event: %s" % (event_name))
print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
key = lambda kv: (kv[1], kv[0]), reverse = True):
print("%-40s %10d %10.1f%%\n" %
(mem_type, count, 100 * count / total),
end='')
def trace_begin():
parse_iomem()
def trace_end():
print_memory_type()
f.close()
def is_system_ram(phys_addr):
#/proc/iomem is sorted
position = bisect.bisect(system_ram, phys_addr)
if position % 2 == 0:
return False
return True
def is_persistent_mem(phys_addr):
position = bisect.bisect(pmem, phys_addr)
if position % 2 == 0:
return False
return True
def find_memory_type(phys_addr):
if phys_addr == 0:
return "N/A"
if is_system_ram(phys_addr):
return "System RAM"
if is_persistent_mem(phys_addr):
return "Persistent Memory"
#slow path, search all
f.seek(0, 0)
for j in f:
m = re.split('-|:',j,2)
if int(m[0], 16) <= phys_addr <= int(m[1], 16):
return m[2]
return "N/A"
def process_event(param_dict):
name = param_dict["ev_name"]
sample = param_dict["sample"]
phys_addr = sample["phys_addr"]
global event_name
if event_name == None:
event_name = name
load_mem_type_cnt[find_memory_type(phys_addr)] += 1

View File

@@ -0,0 +1,78 @@
# Monitor the system for dropped packets and proudce a report of drop locations and counts
# SPDX-License-Identifier: GPL-2.0
from __future__ import print_function
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT"))
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print("%25s %25s %25s" % (sym, off, drop_log[i]))
def trace_begin():
print("Starting trace (Ctrl-C to dump results)")
def trace_end():
print("Gathering kallsyms data")
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1

View File

@@ -0,0 +1,462 @@
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from __future__ import print_function
import os
import sys
from collections import defaultdict
try:
from UserList import UserList
except ImportError:
# Python 3: UserList moved to the collections package
from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in range(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass

View File

@@ -0,0 +1,89 @@
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
from __future__ import print_function
import os, sys, time
try:
import thread
except ImportError:
import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print("\nsyscall events for %s:\n" % (for_comm))
else:
print("\nsyscall events:\n")
print("%-40s %10s" % ("event", "count"))
print("%-40s %10s" %
("----------------------------------------",
"----------"))
for id, val in sorted(syscalls.items(),
key = lambda kv: (kv[1], kv[0]),
reverse = True):
try:
print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
pass
syscalls.clear()
time.sleep(interval)

View File

@@ -0,0 +1,75 @@
# system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
from __future__ import print_function
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print("\nsyscall events for %s:\n" % (for_comm))
else:
print("\nsyscall events by comm/pid:\n")
print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
print("%-40s %10s" % ("----------------------------------------",
"----------"))
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print("\n%s [%d]" % (comm, pid))
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].items(),
key = lambda kv: (kv[1], kv[0]), reverse = True):
print(" %-38s %10d" % (syscall_name(id), val))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,615 @@
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#
""" This utility can be used to debug and tune the performance of the
intel_pstate driver. This utility can be used in two ways:
- If there is Linux trace file with pstate_sample events enabled, then
this utility can parse the trace file and generate performance plots.
- If user has not specified a trace file as input via command line parameters,
then this utility enables and collects trace data for a user specified interval
and generates performance plots.
Prerequisites:
Python version 2.7.x or higher
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
HWP (Hardware P-States are disabled)
Kernel config for Linux trace is enabled
see print_help(): for Usage and Output details
"""
from __future__ import print_function
from datetime import datetime
import subprocess
import os
import time
import re
import signal
import sys
import getopt
import Gnuplot
from numpy import *
from decimal import *
__author__ = "Srinivas Pandruvada"
__copyright__ = " Copyright (c) 2017, Intel Corporation. "
__license__ = "GPL version 2"
MAX_CPUS = 256
# Define the csv file columns
C_COMM = 18
C_GHZ = 17
C_ELAPSED = 16
C_SAMPLE = 15
C_DURATION = 14
C_LOAD = 13
C_BOOST = 12
C_FREQ = 11
C_TSC = 10
C_APERF = 9
C_MPERF = 8
C_TO = 7
C_FROM = 6
C_SCALED = 5
C_CORE = 4
C_USEC = 3
C_SEC = 2
C_CPU = 1
global sample_num, last_sec_cpu, last_usec_cpu, start_time, testname
# 11 digits covers uptime to 115 days
getcontext().prec = 11
sample_num =0
last_sec_cpu = [0] * MAX_CPUS
last_usec_cpu = [0] * MAX_CPUS
def print_help():
print('intel_pstate_tracer.py:')
print(' Usage:')
print(' If the trace file is available, then to simply parse and plot, use (sudo not required):')
print(' ./intel_pstate_tracer.py [-c cpus] -t <trace_file> -n <test_name>')
print(' Or')
print(' ./intel_pstate_tracer.py [--cpu cpus] ---trace_file <trace_file> --name <test_name>')
print(' To generate trace file, parse and plot, use (sudo required):')
print(' sudo ./intel_pstate_tracer.py [-c cpus] -i <interval> -n <test_name> -m <kbytes>')
print(' Or')
print(' sudo ./intel_pstate_tracer.py [--cpu cpus] --interval <interval> --name <test_name> --memory <kbytes>')
print(' Optional argument:')
print(' cpus: comma separated list of CPUs')
print(' kbytes: Kilo bytes of memory per CPU to allocate to the trace buffer. Default: 10240')
print(' Output:')
print(' If not already present, creates a "results/test_name" folder in the current working directory with:')
print(' cpu.csv - comma seperated values file with trace contents and some additional calculations.')
print(' cpu???.csv - comma seperated values file for CPU number ???.')
print(' *.png - a variety of PNG format plot files created from the trace contents and the additional calculations.')
print(' Notes:')
print(' Avoid the use of _ (underscore) in test names, because in gnuplot it is a subscript directive.')
print(' Maximum number of CPUs is {0:d}. If there are more the script will abort with an error.'.format(MAX_CPUS))
print(' Off-line CPUs cause the script to list some warnings, and create some empty files. Use the CPU mask feature for a clean run.')
print(' Empty y range warnings for autoscaled plots can occur and can be ignored.')
def plot_perf_busy_with_sample(cpu_index):
""" Plot method to per cpu information """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y1 range
g_plot('set y2range [0:200]')
g_plot('set y2tics 0, 10')
g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
# Override common
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_SAMPLE, C_CORE))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_SAMPLE, C_SCALED))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_SAMPLE, C_BOOST))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_SAMPLE, C_TO))
def plot_perf_busy(cpu_index):
""" Plot some per cpu information """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_perf_busy.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y1 range
g_plot('set y2range [0:200]')
g_plot('set y2tics 0, 10')
g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "P-State"')
g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_ELAPSED, C_CORE))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_ELAPSED, C_SCALED))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_ELAPSED, C_BOOST))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_ELAPSED, C_TO))
def plot_durations(cpu_index):
""" Plot per cpu durations """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_durations.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
# override common
g_plot('set key off')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DURATION))
def plot_loads(cpu_index):
""" Plot per cpu loads """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_loads.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ytics 0, 10')
g_plot('set title "{} : loads : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "CPU load (percent)"')
# override common
g_plot('set key off')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))
def plot_pstate_cpu_with_sample():
""" Plot all cpu information """
if os.path.exists('cpu.csv'):
output_png = 'all_cpu_pstates_vs_samples.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
# override common
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_pstate_cpu():
""" Plot all cpu information from csv files """
output_png = 'all_cpu_pstates.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now()))
# the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
# plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
#
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_load_cpu():
""" Plot all cpu loads """
output_png = 'all_cpu_loads.png'
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_frequency_cpu():
""" Plot all cpu frequencies """
output_png = 'all_cpu_frequencies.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_duration_cpu():
""" Plot all cpu durations """
output_png = 'all_cpu_durations.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_scaled_cpu():
""" Plot all cpu scaled busy """
output_png = 'all_cpu_scaled.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "Scaled Busy (Unitless)"')
g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_boost_cpu():
""" Plot all cpu IO Boosts """
output_png = 'all_cpu_boost.png'
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU IO Boost (percent)"')
g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_ghz_cpu():
""" Plot all cpu tsc ghz """
output_png = 'all_cpu_ghz.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "TSC Frequency (GHz)"')
g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def common_all_gnuplot_settings(output_png):
""" common gnuplot settings for multiple CPUs one one graph. """
g_plot = common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
return(g_plot)
def common_gnuplot_settings():
""" common gnuplot settings. """
g_plot = Gnuplot.Gnuplot(persist=1)
# The following line is for rigor only. It seems to be assumed for .csv files
g_plot('set datafile separator \",\"')
g_plot('set ytics nomirror')
g_plot('set xtics nomirror')
g_plot('set xtics font ", 10"')
g_plot('set ytics font ", 10"')
g_plot('set tics out scale 1.0')
g_plot('set grid')
g_plot('set key out horiz')
g_plot('set key bot center')
g_plot('set key samplen 2 spacing .8 font ", 9"')
g_plot('set term png size 1200, 600')
g_plot('set title font ", 11"')
g_plot('set ylabel font ", 10"')
g_plot('set xlabel font ", 10"')
g_plot('set xlabel offset 0, 0.5')
g_plot('set xlabel "Elapsed Time (Seconds)"')
return(g_plot)
def set_4_plot_linestyles(g_plot):
""" set the linestyles used for 4 plots in 1 graphs. """
g_plot('set style line 1 linetype 1 linecolor rgb "green" pointtype -1')
g_plot('set style line 2 linetype 1 linecolor rgb "red" pointtype -1')
g_plot('set style line 3 linetype 1 linecolor rgb "purple" pointtype -1')
g_plot('set style line 4 linetype 1 linecolor rgb "blue" pointtype -1')
def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz):
""" Store master csv file information """
global graph_data_present
if cpu_mask[cpu_int] == 0:
return
try:
f_handle = open('cpu.csv', 'a')
string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %u, %u, %u, %u, %.4f, %u, %.2f, %.3f, %u, %.3f, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(core_busy), int(scaled), int(_from), int(_to), int(mperf), int(aperf), int(tsc), freq_ghz, int(io_boost), load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm)
f_handle.write(string_buffer);
f_handle.close()
except:
print('IO error cpu.csv')
return
graph_data_present = True;
def split_csv():
""" seperate the all csv file into per CPU csv files. """
global current_max_cpu
if os.path.exists('cpu.csv'):
for index in range(0, current_max_cpu + 1):
if cpu_mask[int(index)] != 0:
os.system('grep -m 1 common_cpu cpu.csv > cpu{:0>3}.csv'.format(index))
os.system('grep CPU_{:0>3} cpu.csv >> cpu{:0>3}.csv'.format(index, index))
def fix_ownership(path):
"""Change the owner of the file to SUDO_UID, if required"""
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid is not None:
os.chown(path, int(uid), int(gid))
def cleanup_data_files():
""" clean up existing data files """
if os.path.exists('cpu.csv'):
os.remove('cpu.csv')
f_handle = open('cpu.csv', 'a')
f_handle.write('common_cpu, common_secs, common_usecs, core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, boost, load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm')
f_handle.write('\n')
f_handle.close()
def clear_trace_file():
""" Clear trace file """
try:
f_handle = open('/sys/kernel/debug/tracing/trace', 'w')
f_handle.close()
except:
print('IO error clearing trace file ')
sys.exit(2)
def enable_trace():
""" Enable trace """
try:
open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable'
, 'w').write("1")
except:
print('IO error enabling trace ')
sys.exit(2)
def disable_trace():
""" Disable trace """
try:
open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable'
, 'w').write("0")
except:
print('IO error disabling trace ')
sys.exit(2)
def set_trace_buffer_size():
""" Set trace buffer size """
try:
with open('/sys/kernel/debug/tracing/buffer_size_kb', 'w') as fp:
fp.write(memory)
except:
print('IO error setting trace buffer size ')
sys.exit(2)
def free_trace_buffer():
""" Free the trace buffer memory """
try:
open('/sys/kernel/debug/tracing/buffer_size_kb'
, 'w').write("1")
except:
print('IO error freeing trace buffer ')
sys.exit(2)
def read_trace_data(filename):
""" Read and parse trace data """
global current_max_cpu
global sample_num, last_sec_cpu, last_usec_cpu, start_time
try:
data = open(filename, 'r').read()
except:
print('Error opening ', filename)
sys.exit(2)
for line in data.splitlines():
search_obj = \
re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?core_busy=)(\d+)(.*?scaled=)(\d+)(.*?from=)(\d+)(.*?to=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)(.*?freq=)(\d+)'
, line)
if search_obj:
cpu = search_obj.group(3)
cpu_int = int(cpu)
cpu = str(cpu_int)
time_pre_dec = search_obj.group(6)
time_post_dec = search_obj.group(8)
core_busy = search_obj.group(10)
scaled = search_obj.group(12)
_from = search_obj.group(14)
_to = search_obj.group(16)
mperf = search_obj.group(18)
aperf = search_obj.group(20)
tsc = search_obj.group(22)
freq = search_obj.group(24)
common_comm = search_obj.group(2).replace(' ', '')
# Not all kernel versions have io_boost field
io_boost = '0'
search_obj = re.search(r'.*?io_boost=(\d+)', line)
if search_obj:
io_boost = search_obj.group(1)
if sample_num == 0 :
start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)
sample_num += 1
if last_sec_cpu[cpu_int] == 0 :
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
else :
duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))
duration_ms = Decimal(duration_us) / Decimal(1000)
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time
load = Decimal(int(mperf)*100)/ Decimal(tsc)
freq_ghz = Decimal(freq)/Decimal(1000000)
# Sanity check calculation, typically anomalies indicate missed samples
# However, check for 0 (should never occur)
tsc_ghz = Decimal(0)
if duration_ms != Decimal(0) :
tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000)
store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz)
if cpu_int > current_max_cpu:
current_max_cpu = cpu_int
# End of for each trace line loop
# Now seperate the main overall csv file into per CPU csv files.
split_csv()
def signal_handler(signal, frame):
print(' SIGINT: Forcing cleanup before exit.')
if interval:
disable_trace()
clear_trace_file()
# Free the memory
free_trace_buffer()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
interval = ""
filename = ""
cpu_list = ""
testname = ""
memory = "10240"
graph_data_present = False;
valid1 = False
valid2 = False
cpu_mask = zeros((MAX_CPUS,), dtype=int)
try:
opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print()
sys.exit()
elif opt in ("-t", "--trace_file"):
valid1 = True
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
filename = os.path.join(location, arg)
elif opt in ("-i", "--interval"):
valid1 = True
interval = arg
elif opt in ("-c", "--cpu"):
cpu_list = arg
elif opt in ("-n", "--name"):
valid2 = True
testname = arg
elif opt in ("-m", "--memory"):
memory = arg
if not (valid1 and valid2):
print_help()
sys.exit()
if cpu_list:
for p in re.split("[,]", cpu_list):
if int(p) < MAX_CPUS :
cpu_mask[int(p)] = 1
else:
for i in range (0, MAX_CPUS):
cpu_mask[i] = 1
if not os.path.exists('results'):
os.mkdir('results')
# The regular user needs to own the directory, not root.
fix_ownership('results')
os.chdir('results')
if os.path.exists(testname):
print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.')
sys.exit()
os.mkdir(testname)
# The regular user needs to own the directory, not root.
fix_ownership(testname)
os.chdir(testname)
# Temporary (or perhaps not)
cur_version = sys.version_info
print('python version (should be >= 2.7):')
print(cur_version)
# Left as "cleanup" for potential future re-run ability.
cleanup_data_files()
if interval:
filename = "/sys/kernel/debug/tracing/trace"
clear_trace_file()
set_trace_buffer_size()
enable_trace()
print('Sleeping for ', interval, 'seconds')
time.sleep(int(interval))
disable_trace()
current_max_cpu = 0
read_trace_data(filename)
if interval:
clear_trace_file()
# Free the memory
free_trace_buffer()
if graph_data_present == False:
print('No valid data to plot')
sys.exit(2)
for cpu_no in range(0, current_max_cpu + 1):
plot_perf_busy_with_sample(cpu_no)
plot_perf_busy(cpu_no)
plot_durations(cpu_no)
plot_loads(cpu_no)
plot_pstate_cpu_with_sample()
plot_pstate_cpu()
plot_load_cpu()
plot_frequency_cpu()
plot_duration_cpu()
plot_scaled_cpu()
plot_boost_cpu()
plot_ghz_cpu()
# It is preferrable, but not necessary, that the regular user owns the files, not root.
for root, dirs, files in os.walk('.'):
for f in files:
fix_ownership(f)
os.chdir('../../')

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env drgn
# SPDX-License-Identifier: GPL-2.0+
#
# Dump out the number of RCU callbacks outstanding.
#
# On older kernels having multiple flavors of RCU, this dumps out the
# number of callbacks for the most heavily used flavor.
#
# Usage: sudo drgn rcu-cbs.py
#
# Copyright (C) 2021 Facebook, Inc.
#
# Authors: Paul E. McKenney <paulmck@kernel.org>
import sys
import drgn
from drgn import NULL, Object
from drgn.helpers.linux import *
def get_rdp0(prog):
try:
rdp0 = prog.variable('rcu_preempt_data', 'kernel/rcu/tree.c');
except LookupError:
rdp0 = NULL;
if rdp0 == NULL:
try:
rdp0 = prog.variable('rcu_sched_data',
'kernel/rcu/tree.c');
except LookupError:
rdp0 = NULL;
if rdp0 == NULL:
rdp0 = prog.variable('rcu_data', 'kernel/rcu/tree.c');
return rdp0.address_of_();
rdp0 = get_rdp0(prog);
# Sum up RCU callbacks.
sum = 0;
for cpu in for_each_possible_cpu(prog):
rdp = per_cpu_ptr(rdp0, cpu);
len = rdp.cblist.len.value_();
# print("CPU " + str(cpu) + " RCU callbacks: " + str(len));
sum += len;
print("Number of RCU callbacks in flight: " + str(sum));