python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 """ tdc.py - Linux tc (Traffic Control) unit test driver Copyright (C) 2017 Lucas Bates <[email protected]> """ import re import os import sys import argparse import importlib import json import subprocess import time import traceback from collections import OrderedDict from string import Template from tdc_config import * from tdc_helper import * import TdcPlugin from TdcResults import * class PluginDependencyException(Exception): def __init__(self, missing_pg): self.missing_pg = missing_pg class PluginMgrTestFail(Exception): def __init__(self, stage, output, message): self.stage = stage self.output = output self.message = message class PluginMgr: def __init__(self, argparser): super().__init__() self.plugins = {} self.plugin_instances = [] self.failed_plugins = {} self.argparser = argparser # TODO, put plugins in order plugindir = os.getenv('TDC_PLUGIN_DIR', './plugins') for dirpath, dirnames, filenames in os.walk(plugindir): for fn in filenames: if (fn.endswith('.py') and not fn == '__init__.py' and not fn.startswith('#') and not fn.startswith('.#')): mn = fn[0:-3] foo = importlib.import_module('plugins.' + mn) self.plugins[mn] = foo self.plugin_instances.append(foo.SubPlugin()) def load_plugin(self, pgdir, pgname): pgname = pgname[0:-3] foo = importlib.import_module('{}.{}'.format(pgdir, pgname)) self.plugins[pgname] = foo self.plugin_instances.append(foo.SubPlugin()) self.plugin_instances[-1].check_args(self.args, None) def get_required_plugins(self, testlist): ''' Get all required plugins from the list of test cases and return all unique items. ''' reqs = [] for t in testlist: try: if 'requires' in t['plugins']: if isinstance(t['plugins']['requires'], list): reqs.extend(t['plugins']['requires']) else: reqs.append(t['plugins']['requires']) except KeyError: continue reqs = get_unique_item(reqs) return reqs def load_required_plugins(self, reqs, parser, args, remaining): ''' Get all required plugins from the list of test cases and load any plugin that is not already enabled. ''' pgd = ['plugin-lib', 'plugin-lib-custom'] pnf = [] for r in reqs: if r not in self.plugins: fname = '{}.py'.format(r) source_path = [] for d in pgd: pgpath = '{}/{}'.format(d, fname) if os.path.isfile(pgpath): source_path.append(pgpath) if len(source_path) == 0: print('ERROR: unable to find required plugin {}'.format(r)) pnf.append(fname) continue elif len(source_path) > 1: print('WARNING: multiple copies of plugin {} found, using version found') print('at {}'.format(source_path[0])) pgdir = source_path[0] pgdir = pgdir.split('/')[0] self.load_plugin(pgdir, fname) if len(pnf) > 0: raise PluginDependencyException(pnf) parser = self.call_add_args(parser) (args, remaining) = parser.parse_known_args(args=remaining, namespace=args) return args def call_pre_suite(self, testcount, testidlist): for pgn_inst in self.plugin_instances: pgn_inst.pre_suite(testcount, testidlist) def call_post_suite(self, index): for pgn_inst in reversed(self.plugin_instances): pgn_inst.post_suite(index) def call_pre_case(self, caseinfo, *, test_skip=False): for pgn_inst in self.plugin_instances: try: pgn_inst.pre_case(caseinfo, test_skip) except Exception as ee: print('exception {} in call to pre_case for {} plugin'. format(ee, pgn_inst.__class__)) print('test_ordinal is {}'.format(test_ordinal)) print('testid is {}'.format(caseinfo['id'])) raise def call_post_case(self): for pgn_inst in reversed(self.plugin_instances): pgn_inst.post_case() def call_pre_execute(self): for pgn_inst in self.plugin_instances: pgn_inst.pre_execute() def call_post_execute(self): for pgn_inst in reversed(self.plugin_instances): pgn_inst.post_execute() def call_add_args(self, parser): for pgn_inst in self.plugin_instances: parser = pgn_inst.add_args(parser) return parser def call_check_args(self, args, remaining): for pgn_inst in self.plugin_instances: pgn_inst.check_args(args, remaining) def call_adjust_command(self, stage, command): for pgn_inst in self.plugin_instances: command = pgn_inst.adjust_command(stage, command) return command def set_args(self, args): self.args = args @staticmethod def _make_argparser(args): self.argparser = argparse.ArgumentParser( description='Linux TC unit tests') def replace_keywords(cmd): """ For a given executable command, substitute any known variables contained within NAMES with the correct values """ tcmd = Template(cmd) subcmd = tcmd.safe_substitute(NAMES) return subcmd def exec_cmd(args, pm, stage, command): """ Perform any required modifications on an executable command, then run it in a subprocess and return the results. """ if len(command.strip()) == 0: return None, None if '$' in command: command = replace_keywords(command) command = pm.call_adjust_command(stage, command) if args.verbose > 0: print('command "{}"'.format(command)) proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=ENVIR) try: (rawout, serr) = proc.communicate(timeout=NAMES['TIMEOUT']) if proc.returncode != 0 and len(serr) > 0: foutput = serr.decode("utf-8", errors="ignore") else: foutput = rawout.decode("utf-8", errors="ignore") except subprocess.TimeoutExpired: foutput = "Command \"{}\" timed out\n".format(command) proc.returncode = 255 proc.stdout.close() proc.stderr.close() return proc, foutput def prepare_env(args, pm, stage, prefix, cmdlist, output = None): """ Execute the setup/teardown commands for a test case. Optionally terminate test execution if the command fails. """ if args.verbose > 0: print('{}'.format(prefix)) for cmdinfo in cmdlist: if isinstance(cmdinfo, list): exit_codes = cmdinfo[1:] cmd = cmdinfo[0] else: exit_codes = [0] cmd = cmdinfo if not cmd: continue (proc, foutput) = exec_cmd(args, pm, stage, cmd) if proc and (proc.returncode not in exit_codes): print('', file=sys.stderr) print("{} *** Could not execute: \"{}\"".format(prefix, cmd), file=sys.stderr) print("\n{} *** Error message: \"{}\"".format(prefix, foutput), file=sys.stderr) print("returncode {}; expected {}".format(proc.returncode, exit_codes)) print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr) print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr) print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr) raise PluginMgrTestFail( stage, output, '"{}" did not complete successfully'.format(prefix)) def run_one_test(pm, args, index, tidx): global NAMES result = True tresult = "" tap = "" res = TestResult(tidx['id'], tidx['name']) if args.verbose > 0: print("\t====================\n=====> ", end="") print("Test " + tidx["id"] + ": " + tidx["name"]) if 'skip' in tidx: if tidx['skip'] == 'yes': res = TestResult(tidx['id'], tidx['name']) res.set_result(ResultState.skip) res.set_errormsg('Test case designated as skipped.') pm.call_pre_case(tidx, test_skip=True) pm.call_post_execute() return res # populate NAMES with TESTID for this test NAMES['TESTID'] = tidx['id'] pm.call_pre_case(tidx) prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"]) if (args.verbose > 0): print('-----> execute stage') pm.call_pre_execute() (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"]) if p: exit_code = p.returncode else: exit_code = None pm.call_post_execute() if (exit_code is None or exit_code != int(tidx["expExitCode"])): print("exit: {!r}".format(exit_code)) print("exit: {}".format(int(tidx["expExitCode"]))) #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"]))) res.set_result(ResultState.fail) res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx["expExitCode"], procout)) print(procout) else: if args.verbose > 0: print('-----> verify stage') match_pattern = re.compile( str(tidx["matchPattern"]), re.DOTALL | re.MULTILINE) (p, procout) = exec_cmd(args, pm, 'verify', tidx["verifyCmd"]) if procout: match_index = re.findall(match_pattern, procout) if len(match_index) != int(tidx["matchCount"]): res.set_result(ResultState.fail) res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout)) else: res.set_result(ResultState.success) elif int(tidx["matchCount"]) != 0: res.set_result(ResultState.fail) res.set_failmsg('No output generated by verify command.') else: res.set_result(ResultState.success) prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout) pm.call_post_case() index += 1 # remove TESTID from NAMES del(NAMES['TESTID']) return res def test_runner(pm, args, filtered_tests): """ Driver function for the unit tests. Prints information about the tests being run, executes the setup and teardown commands and the command under test itself. Also determines success/failure based on the information in the test case and generates TAP output accordingly. """ testlist = filtered_tests tcount = len(testlist) index = 1 tap = '' badtest = None stage = None emergency_exit = False emergency_exit_message = '' tsr = TestSuiteReport() try: pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist]) except Exception as ee: ex_type, ex, ex_tb = sys.exc_info() print('Exception {} {} (caught in pre_suite).'. format(ex_type, ex)) traceback.print_tb(ex_tb) emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex) emergency_exit = True stage = 'pre-SUITE' if emergency_exit: pm.call_post_suite(index) return emergency_exit_message if args.verbose > 1: print('give test rig 2 seconds to stabilize') time.sleep(2) for tidx in testlist: if "flower" in tidx["category"] and args.device == None: errmsg = "Tests using the DEV2 variable must define the name of a " errmsg += "physical NIC with the -d option when running tdc.\n" errmsg += "Test has been skipped." if args.verbose > 1: print(errmsg) res = TestResult(tidx['id'], tidx['name']) res.set_result(ResultState.skip) res.set_errormsg(errmsg) tsr.add_resultdata(res) continue try: badtest = tidx # in case it goes bad res = run_one_test(pm, args, index, tidx) tsr.add_resultdata(res) except PluginMgrTestFail as pmtf: ex_type, ex, ex_tb = sys.exc_info() stage = pmtf.stage message = pmtf.message output = pmtf.output res = TestResult(tidx['id'], tidx['name']) res.set_result(ResultState.skip) res.set_errormsg(pmtf.message) res.set_failmsg(pmtf.output) tsr.add_resultdata(res) index += 1 print(message) print('Exception {} {} (caught in test_runner, running test {} {} {} stage {})'. format(ex_type, ex, index, tidx['id'], tidx['name'], stage)) print('---------------') print('traceback') traceback.print_tb(ex_tb) print('---------------') if stage == 'teardown': print('accumulated output for this test:') if pmtf.output: print(pmtf.output) print('---------------') break index += 1 # if we failed in setup or teardown, # fill in the remaining tests with ok-skipped count = index if tcount + 1 != count: for tidx in testlist[count - 1:]: res = TestResult(tidx['id'], tidx['name']) res.set_result(ResultState.skip) msg = 'skipped - previous {} failed {} {}'.format(stage, index, badtest.get('id', '--Unknown--')) res.set_errormsg(msg) tsr.add_resultdata(res) count += 1 if args.pause: print('Want to pause\nPress enter to continue ...') if input(sys.stdin): print('got something on stdin') pm.call_post_suite(index) return tsr def has_blank_ids(idlist): """ Search the list for empty ID fields and return true/false accordingly. """ return not(all(k for k in idlist)) def load_from_file(filename): """ Open the JSON file containing the test cases and return them as list of ordered dictionary objects. """ try: with open(filename) as test_data: testlist = json.load(test_data, object_pairs_hook=OrderedDict) except json.JSONDecodeError as jde: print('IGNORING test case file {}\n\tBECAUSE: {}'.format(filename, jde)) testlist = list() else: idlist = get_id_list(testlist) if (has_blank_ids(idlist)): for k in testlist: k['filename'] = filename return testlist def args_parse(): """ Create the argument parser. """ parser = argparse.ArgumentParser(description='Linux TC unit tests') return parser def set_args(parser): """ Set the command line arguments for tdc. """ parser.add_argument( '--outfile', type=str, help='Path to the file in which results should be saved. ' + 'Default target is the current directory.') parser.add_argument( '-p', '--path', type=str, help='The full path to the tc executable to use') sg = parser.add_argument_group( 'selection', 'select which test cases: ' + 'files plus directories; filtered by categories plus testids') ag = parser.add_argument_group( 'action', 'select action to perform on selected test cases') sg.add_argument( '-D', '--directory', nargs='+', metavar='DIR', help='Collect tests from the specified directory(ies) ' + '(default [tc-tests])') sg.add_argument( '-f', '--file', nargs='+', metavar='FILE', help='Run tests from the specified file(s)') sg.add_argument( '-c', '--category', nargs='*', metavar='CATG', default=['+c'], help='Run tests only from the specified category/ies, ' + 'or if no category/ies is/are specified, list known categories.') sg.add_argument( '-e', '--execute', nargs='+', metavar='ID', help='Execute the specified test cases with specified IDs') ag.add_argument( '-l', '--list', action='store_true', help='List all test cases, or those only within the specified category') ag.add_argument( '-s', '--show', action='store_true', dest='showID', help='Display the selected test cases') ag.add_argument( '-i', '--id', action='store_true', dest='gen_id', help='Generate ID numbers for new test cases') parser.add_argument( '-v', '--verbose', action='count', default=0, help='Show the commands that are being run') parser.add_argument( '--format', default='tap', const='tap', nargs='?', choices=['none', 'xunit', 'tap'], help='Specify the format for test results. (Default: TAP)') parser.add_argument('-d', '--device', help='Execute test cases that use a physical device, ' + 'where DEVICE is its name. (If not defined, tests ' + 'that require a physical device will be skipped)') parser.add_argument( '-P', '--pause', action='store_true', help='Pause execution just before post-suite stage') return parser def check_default_settings(args, remaining, pm): """ Process any arguments overriding the default settings, and ensure the settings are correct. """ # Allow for overriding specific settings global NAMES if args.path != None: NAMES['TC'] = args.path if args.device != None: NAMES['DEV2'] = args.device if 'TIMEOUT' not in NAMES: NAMES['TIMEOUT'] = None if not os.path.isfile(NAMES['TC']): print("The specified tc path " + NAMES['TC'] + " does not exist.") exit(1) pm.call_check_args(args, remaining) def get_id_list(alltests): """ Generate a list of all IDs in the test cases. """ return [x["id"] for x in alltests] def check_case_id(alltests): """ Check for duplicate test case IDs. """ idl = get_id_list(alltests) return [x for x in idl if idl.count(x) > 1] def does_id_exist(alltests, newid): """ Check if a given ID already exists in the list of test cases. """ idl = get_id_list(alltests) return (any(newid == x for x in idl)) def generate_case_ids(alltests): """ If a test case has a blank ID field, generate a random hex ID for it and then write the test cases back to disk. """ import random for c in alltests: if (c["id"] == ""): while True: newid = str('{:04x}'.format(random.randrange(16**4))) if (does_id_exist(alltests, newid)): continue else: c['id'] = newid break ufilename = [] for c in alltests: if ('filename' in c): ufilename.append(c['filename']) ufilename = get_unique_item(ufilename) for f in ufilename: testlist = [] for t in alltests: if 'filename' in t: if t['filename'] == f: del t['filename'] testlist.append(t) outfile = open(f, "w") json.dump(testlist, outfile, indent=4) outfile.write("\n") outfile.close() def filter_tests_by_id(args, testlist): ''' Remove tests from testlist that are not in the named id list. If id list is empty, return empty list. ''' newlist = list() if testlist and args.execute: target_ids = args.execute if isinstance(target_ids, list) and (len(target_ids) > 0): newlist = list(filter(lambda x: x['id'] in target_ids, testlist)) return newlist def filter_tests_by_category(args, testlist): ''' Remove tests from testlist that are not in a named category. ''' answer = list() if args.category and testlist: test_ids = list() for catg in set(args.category): if catg == '+c': continue print('considering category {}'.format(catg)) for tc in testlist: if catg in tc['category'] and tc['id'] not in test_ids: answer.append(tc) test_ids.append(tc['id']) return answer def get_test_cases(args): """ If a test case file is specified, retrieve tests from that file. Otherwise, glob for all json files in subdirectories and load from each one. Also, if requested, filter by category, and add tests matching certain ids. """ import fnmatch flist = [] testdirs = ['tc-tests'] if args.file: # at least one file was specified - remove the default directory testdirs = [] for ff in args.file: if not os.path.isfile(ff): print("IGNORING file " + ff + "\n\tBECAUSE does not exist.") else: flist.append(os.path.abspath(ff)) if args.directory: testdirs = args.directory for testdir in testdirs: for root, dirnames, filenames in os.walk(testdir): for filename in fnmatch.filter(filenames, '*.json'): candidate = os.path.abspath(os.path.join(root, filename)) if candidate not in testdirs: flist.append(candidate) alltestcases = list() for casefile in flist: alltestcases = alltestcases + (load_from_file(casefile)) allcatlist = get_test_categories(alltestcases) allidlist = get_id_list(alltestcases) testcases_by_cats = get_categorized_testlist(alltestcases, allcatlist) idtestcases = filter_tests_by_id(args, alltestcases) cattestcases = filter_tests_by_category(args, alltestcases) cat_ids = [x['id'] for x in cattestcases] if args.execute: if args.category: alltestcases = cattestcases + [x for x in idtestcases if x['id'] not in cat_ids] else: alltestcases = idtestcases else: if cat_ids: alltestcases = cattestcases else: # just accept the existing value of alltestcases, # which has been filtered by file/directory pass return allcatlist, allidlist, testcases_by_cats, alltestcases def set_operation_mode(pm, parser, args, remaining): """ Load the test case data and process remaining arguments to determine what the script should do for this run, and call the appropriate function. """ ucat, idlist, testcases, alltests = get_test_cases(args) if args.gen_id: if (has_blank_ids(idlist)): alltests = generate_case_ids(alltests) else: print("No empty ID fields found in test files.") exit(0) duplicate_ids = check_case_id(alltests) if (len(duplicate_ids) > 0): print("The following test case IDs are not unique:") print(str(set(duplicate_ids))) print("Please correct them before continuing.") exit(1) if args.showID: for atest in alltests: print_test_case(atest) exit(0) if isinstance(args.category, list) and (len(args.category) == 0): print("Available categories:") print_sll(ucat) exit(0) if args.list: list_test_cases(alltests) exit(0) exit_code = 0 # KSFT_PASS if len(alltests): req_plugins = pm.get_required_plugins(alltests) try: args = pm.load_required_plugins(req_plugins, parser, args, remaining) except PluginDependencyException as pde: print('The following plugins were not found:') print('{}'.format(pde.missing_pg)) catresults = test_runner(pm, args, alltests) if catresults.count_failures() != 0: exit_code = 1 # KSFT_FAIL if args.format == 'none': print('Test results output suppression requested\n') else: print('\nAll test results: \n') if args.format == 'xunit': suffix = 'xml' res = catresults.format_xunit() elif args.format == 'tap': suffix = 'tap' res = catresults.format_tap() print(res) print('\n\n') if not args.outfile: fname = 'test-results.{}'.format(suffix) else: fname = args.outfile with open(fname, 'w') as fh: fh.write(res) fh.close() if os.getenv('SUDO_UID') is not None: os.chown(fname, uid=int(os.getenv('SUDO_UID')), gid=int(os.getenv('SUDO_GID'))) else: print('No tests found\n') exit_code = 4 # KSFT_SKIP exit(exit_code) def main(): """ Start of execution; set up argument parser and get the arguments, and start operations. """ parser = args_parse() parser = set_args(parser) pm = PluginMgr(parser) parser = pm.call_add_args(parser) (args, remaining) = parser.parse_known_args() args.NAMES = NAMES pm.set_args(args) check_default_settings(args, remaining, pm) if args.verbose > 2: print('args is {}'.format(args)) set_operation_mode(pm, parser, args, remaining) if __name__ == "__main__": main()
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/tdc.py
""" # SPDX-License-Identifier: GPL-2.0 tdc_config.py - tdc user-specified values Copyright (C) 2017 Lucas Bates <[email protected]> """ # Dictionary containing all values that can be substituted in executable # commands. NAMES = { # Substitute your own tc path here 'TC': '/sbin/tc', # Substitute your own ip path here 'IP': '/sbin/ip', # Name of veth devices to be created for the namespace 'DEV0': 'v0p0', 'DEV1': 'v0p1', 'DEV2': '', 'DUMMY': 'dummy1', 'ETH': 'eth0', 'BATCH_FILE': './batch.txt', 'BATCH_DIR': 'tmp', # Length of time in seconds to wait before terminating a command 'TIMEOUT': 24, # Name of the namespace to use 'NS': 'tcut', # Directory containing eBPF test programs 'EBPFDIR': './' } ENVIR = { } # put customizations in tdc_config_local.py try: from tdc_config_local import * except ImportError as ie: pass try: NAMES.update(EXTRA_NAMES) except NameError as ne: pass
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/tdc_config.py
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/plugins/__init__.py
''' build ebpf program ''' import os import signal from string import Template import subprocess import time from TdcPlugin import TdcPlugin from tdc_config import * class SubPlugin(TdcPlugin): def __init__(self): self.sub_class = 'buildebpf/SubPlugin' self.tap = '' super().__init__() def pre_suite(self, testcount, testidlist): super().pre_suite(testcount, testidlist) if self.args.buildebpf: self._ebpf_makeall() def post_suite(self, index): super().post_suite(index) self._ebpf_makeclean() def add_args(self, parser): super().add_args(parser) self.argparser_group = self.argparser.add_argument_group( 'buildebpf', 'options for buildebpfPlugin') self.argparser_group.add_argument( '--nobuildebpf', action='store_false', default=True, dest='buildebpf', help='Don\'t build eBPF programs') return self.argparser def _ebpf_makeall(self): if self.args.buildebpf: self._make('all') def _ebpf_makeclean(self): if self.args.buildebpf: self._make('clean') def _make(self, target): command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target) proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) (rawout, serr) = proc.communicate() if proc.returncode != 0 and len(serr) > 0: foutput = serr.decode("utf-8") else: foutput = rawout.decode("utf-8") proc.stdout.close() proc.stderr.close() return proc, foutput
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
import os import signal from string import Template import subprocess import time from TdcPlugin import TdcPlugin from tdc_config import * class SubPlugin(TdcPlugin): def __init__(self): self.sub_class = 'ns/SubPlugin' super().__init__() def pre_suite(self, testcount, testidlist): '''run commands before test_runner goes into a test loop''' super().pre_suite(testcount, testidlist) if self.args.namespace: self._ns_create() else: self._ports_create() def post_suite(self, index): '''run commands after test_runner goes into a test loop''' super().post_suite(index) if self.args.verbose: print('{}.post_suite'.format(self.sub_class)) if self.args.namespace: self._ns_destroy() else: self._ports_destroy() def add_args(self, parser): super().add_args(parser) self.argparser_group = self.argparser.add_argument_group( 'netns', 'options for nsPlugin(run commands in net namespace)') self.argparser_group.add_argument( '-N', '--no-namespace', action='store_false', default=True, dest='namespace', help='Don\'t run commands in namespace') return self.argparser def adjust_command(self, stage, command): super().adjust_command(stage, command) cmdform = 'list' cmdlist = list() if not self.args.namespace: return command if self.args.verbose: print('{}.adjust_command'.format(self.sub_class)) if not isinstance(command, list): cmdform = 'str' cmdlist = command.split() else: cmdlist = command if stage == 'setup' or stage == 'execute' or stage == 'verify' or stage == 'teardown': if self.args.verbose: print('adjust_command: stage is {}; inserting netns stuff in command [{}] list [{}]'.format(stage, command, cmdlist)) cmdlist.insert(0, self.args.NAMES['NS']) cmdlist.insert(0, 'exec') cmdlist.insert(0, 'netns') cmdlist.insert(0, self.args.NAMES['IP']) else: pass if cmdform == 'str': command = ' '.join(cmdlist) else: command = cmdlist if self.args.verbose: print('adjust_command: return command [{}]'.format(command)) return command def _ports_create(self): cmd = '$IP link add $DEV0 type veth peer name $DEV1' self._exec_cmd('pre', cmd) cmd = '$IP link set $DEV0 up' self._exec_cmd('pre', cmd) if not self.args.namespace: cmd = '$IP link set $DEV1 up' self._exec_cmd('pre', cmd) def _ports_destroy(self): cmd = '$IP link del $DEV0' self._exec_cmd('post', cmd) def _ns_create(self): ''' Create the network namespace in which the tests will be run and set up the required network devices for it. ''' self._ports_create() if self.args.namespace: cmd = '$IP netns add {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) if self.args.device: cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) def _ns_destroy(self): ''' Destroy the network namespace for testing (and any associated network devices as well) ''' if self.args.namespace: cmd = '$IP netns delete {}'.format(self.args.NAMES['NS']) self._exec_cmd('post', cmd) def _exec_cmd(self, stage, command): ''' Perform any required modifications on an executable command, then run it in a subprocess and return the results. ''' if '$' in command: command = self._replace_keywords(command) self.adjust_command(stage, command) if self.args.verbose: print('_exec_cmd: command "{}"'.format(command)) proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=ENVIR) (rawout, serr) = proc.communicate() if proc.returncode != 0 and len(serr) > 0: foutput = serr.decode("utf-8") else: foutput = rawout.decode("utf-8") proc.stdout.close() proc.stderr.close() return proc, foutput def _replace_keywords(self, cmd): """ For a given executable command, substitute any known variables contained within NAMES with the correct values """ tcmd = Template(cmd) subcmd = tcmd.safe_substitute(self.args.NAMES) return subcmd
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
#!/usr/bin/env python3 import os import signal from string import Template import subprocess import time from TdcPlugin import TdcPlugin from tdc_config import * try: from scapy.all import * except ImportError: print("Unable to import the scapy python module.") print("\nIf not already installed, you may do so with:") print("\t\tpip3 install scapy==2.4.2") exit(1) class SubPlugin(TdcPlugin): def __init__(self): self.sub_class = 'scapy/SubPlugin' super().__init__() def post_execute(self): if 'scapy' not in self.args.caseinfo: if self.args.verbose: print('{}.post_execute: no scapy info in test case'.format(self.sub_class)) return # Check for required fields lscapyinfo = self.args.caseinfo['scapy'] if type(lscapyinfo) != list: lscapyinfo = [ lscapyinfo, ] for scapyinfo in lscapyinfo: scapy_keys = ['iface', 'count', 'packet'] missing_keys = [] keyfail = False for k in scapy_keys: if k not in scapyinfo: keyfail = True missing_keys.append(k) if keyfail: print('{}: Scapy block present in the test, but is missing info:' .format(self.sub_class)) print('{}'.format(missing_keys)) pkt = eval(scapyinfo['packet']) if '$' in scapyinfo['iface']: tpl = Template(scapyinfo['iface']) scapyinfo['iface'] = tpl.safe_substitute(NAMES) for count in range(scapyinfo['count']): sendp(pkt, iface=scapyinfo['iface'])
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
import os import sys from TdcPlugin import TdcPlugin from tdc_config import * class SubPlugin(TdcPlugin): def __init__(self): self.sub_class = 'root/SubPlugin' super().__init__() def pre_suite(self, testcount, testidlist): # run commands before test_runner goes into a test loop super().pre_suite(testcount, testidlist) if os.geteuid(): print('This script must be run with root privileges', file=sys.stderr) exit(1)
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py
''' run the command under test, under valgrind and collect memory leak info as a separate test. ''' import os import re import signal from string import Template import subprocess import time from TdcPlugin import TdcPlugin from TdcResults import * from tdc_config import * def vp_extract_num_from_string(num_as_string_maybe_with_commas): return int(num_as_string_maybe_with_commas.replace(',','')) class SubPlugin(TdcPlugin): def __init__(self): self.sub_class = 'valgrind/SubPlugin' self.tap = '' self._tsr = TestSuiteReport() super().__init__() def pre_suite(self, testcount, testidlist): '''run commands before test_runner goes into a test loop''' super().pre_suite(testcount, testidlist) if self.args.verbose > 1: print('{}.pre_suite'.format(self.sub_class)) if self.args.valgrind: self._add_to_tap('1..{}\n'.format(self.testcount)) def post_suite(self, index): '''run commands after test_runner goes into a test loop''' super().post_suite(index) if self.args.verbose > 1: print('{}.post_suite'.format(self.sub_class)) #print('{}'.format(self.tap)) for xx in range(index - 1, self.testcount): res = TestResult('{}-mem'.format(self.testidlist[xx]), 'Test skipped') res.set_result(ResultState.skip) res.set_errormsg('Skipped because of prior setup/teardown failure') self._add_results(res) if self.args.verbose < 4: subprocess.check_output('rm -f vgnd-*.log', shell=True) def add_args(self, parser): super().add_args(parser) self.argparser_group = self.argparser.add_argument_group( 'valgrind', 'options for valgrindPlugin (run command under test under Valgrind)') self.argparser_group.add_argument( '-V', '--valgrind', action='store_true', help='Run commands under valgrind') return self.argparser def adjust_command(self, stage, command): super().adjust_command(stage, command) cmdform = 'list' cmdlist = list() if not self.args.valgrind: return command if self.args.verbose > 1: print('{}.adjust_command'.format(self.sub_class)) if not isinstance(command, list): cmdform = 'str' cmdlist = command.split() else: cmdlist = command if stage == 'execute': if self.args.verbose > 1: print('adjust_command: stage is {}; inserting valgrind stuff in command [{}] list [{}]'. format(stage, command, cmdlist)) cmdlist.insert(0, '--track-origins=yes') cmdlist.insert(0, '--show-leak-kinds=definite,indirect') cmdlist.insert(0, '--leak-check=full') cmdlist.insert(0, '--log-file=vgnd-{}.log'.format(self.args.testid)) cmdlist.insert(0, '-v') # ask for summary of non-leak errors cmdlist.insert(0, ENVIR['VALGRIND_BIN']) else: pass if cmdform == 'str': command = ' '.join(cmdlist) else: command = cmdlist if self.args.verbose > 1: print('adjust_command: return command [{}]'.format(command)) return command def post_execute(self): if not self.args.valgrind: return res = TestResult('{}-mem'.format(self.args.testid), '{} memory leak check'.format(self.args.test_name)) if self.args.test_skip: res.set_result(ResultState.skip) res.set_errormsg('Test case designated as skipped.') self._add_results(res) return self.definitely_lost_re = re.compile( r'definitely lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\sblocks', re.MULTILINE | re.DOTALL) self.indirectly_lost_re = re.compile( r'indirectly lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\s+blocks', re.MULTILINE | re.DOTALL) self.possibly_lost_re = re.compile( r'possibly lost:\s+([,0-9]+)bytes in\s+([,0-9]+)\s+blocks', re.MULTILINE | re.DOTALL) self.non_leak_error_re = re.compile( r'ERROR SUMMARY:\s+([,0-9]+) errors from\s+([,0-9]+)\s+contexts', re.MULTILINE | re.DOTALL) def_num = 0 ind_num = 0 pos_num = 0 nle_num = 0 # what about concurrent test runs? Maybe force them to be in different directories? with open('vgnd-{}.log'.format(self.args.testid)) as vfd: content = vfd.read() def_mo = self.definitely_lost_re.search(content) ind_mo = self.indirectly_lost_re.search(content) pos_mo = self.possibly_lost_re.search(content) nle_mo = self.non_leak_error_re.search(content) if def_mo: def_num = int(def_mo.group(2)) if ind_mo: ind_num = int(ind_mo.group(2)) if pos_mo: pos_num = int(pos_mo.group(2)) if nle_mo: nle_num = int(nle_mo.group(1)) mem_results = '' if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0): mem_results += 'not ' res.set_result(ResultState.fail) res.set_failmsg('Memory leak detected') res.append_failmsg(content) else: res.set_result(ResultState.success) self._add_results(res) def _add_results(self, res): self._tsr.add_resultdata(res) def _add_to_tap(self, more_tap_output): self.tap += more_tap_output
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) import hashlib import os import socket import struct import sys import unittest import fcntl import select TPM2_ST_NO_SESSIONS = 0x8001 TPM2_ST_SESSIONS = 0x8002 TPM2_CC_FIRST = 0x01FF TPM2_CC_CREATE_PRIMARY = 0x0131 TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET = 0x0139 TPM2_CC_CREATE = 0x0153 TPM2_CC_LOAD = 0x0157 TPM2_CC_UNSEAL = 0x015E TPM2_CC_FLUSH_CONTEXT = 0x0165 TPM2_CC_START_AUTH_SESSION = 0x0176 TPM2_CC_GET_CAPABILITY = 0x017A TPM2_CC_GET_RANDOM = 0x017B TPM2_CC_PCR_READ = 0x017E TPM2_CC_POLICY_PCR = 0x017F TPM2_CC_PCR_EXTEND = 0x0182 TPM2_CC_POLICY_PASSWORD = 0x018C TPM2_CC_POLICY_GET_DIGEST = 0x0189 TPM2_SE_POLICY = 0x01 TPM2_SE_TRIAL = 0x03 TPM2_ALG_RSA = 0x0001 TPM2_ALG_SHA1 = 0x0004 TPM2_ALG_AES = 0x0006 TPM2_ALG_KEYEDHASH = 0x0008 TPM2_ALG_SHA256 = 0x000B TPM2_ALG_NULL = 0x0010 TPM2_ALG_CBC = 0x0042 TPM2_ALG_CFB = 0x0043 TPM2_RH_OWNER = 0x40000001 TPM2_RH_NULL = 0x40000007 TPM2_RH_LOCKOUT = 0x4000000A TPM2_RS_PW = 0x40000009 TPM2_RC_SIZE = 0x01D5 TPM2_RC_AUTH_FAIL = 0x098E TPM2_RC_POLICY_FAIL = 0x099D TPM2_RC_COMMAND_CODE = 0x0143 TSS2_RC_LAYER_SHIFT = 16 TSS2_RESMGR_TPM_RC_LAYER = (11 << TSS2_RC_LAYER_SHIFT) TPM2_CAP_HANDLES = 0x00000001 TPM2_CAP_COMMANDS = 0x00000002 TPM2_CAP_PCRS = 0x00000005 TPM2_CAP_TPM_PROPERTIES = 0x00000006 TPM2_PT_FIXED = 0x100 TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41 HR_SHIFT = 24 HR_LOADED_SESSION = 0x02000000 HR_TRANSIENT = 0x80000000 SHA1_DIGEST_SIZE = 20 SHA256_DIGEST_SIZE = 32 TPM2_VER0_ERRORS = { 0x000: "TPM_RC_SUCCESS", 0x030: "TPM_RC_BAD_TAG", } TPM2_VER1_ERRORS = { 0x000: "TPM_RC_FAILURE", 0x001: "TPM_RC_FAILURE", 0x003: "TPM_RC_SEQUENCE", 0x00B: "TPM_RC_PRIVATE", 0x019: "TPM_RC_HMAC", 0x020: "TPM_RC_DISABLED", 0x021: "TPM_RC_EXCLUSIVE", 0x024: "TPM_RC_AUTH_TYPE", 0x025: "TPM_RC_AUTH_MISSING", 0x026: "TPM_RC_POLICY", 0x027: "TPM_RC_PCR", 0x028: "TPM_RC_PCR_CHANGED", 0x02D: "TPM_RC_UPGRADE", 0x02E: "TPM_RC_TOO_MANY_CONTEXTS", 0x02F: "TPM_RC_AUTH_UNAVAILABLE", 0x030: "TPM_RC_REBOOT", 0x031: "TPM_RC_UNBALANCED", 0x042: "TPM_RC_COMMAND_SIZE", 0x043: "TPM_RC_COMMAND_CODE", 0x044: "TPM_RC_AUTHSIZE", 0x045: "TPM_RC_AUTH_CONTEXT", 0x046: "TPM_RC_NV_RANGE", 0x047: "TPM_RC_NV_SIZE", 0x048: "TPM_RC_NV_LOCKED", 0x049: "TPM_RC_NV_AUTHORIZATION", 0x04A: "TPM_RC_NV_UNINITIALIZED", 0x04B: "TPM_RC_NV_SPACE", 0x04C: "TPM_RC_NV_DEFINED", 0x050: "TPM_RC_BAD_CONTEXT", 0x051: "TPM_RC_CPHASH", 0x052: "TPM_RC_PARENT", 0x053: "TPM_RC_NEEDS_TEST", 0x054: "TPM_RC_NO_RESULT", 0x055: "TPM_RC_SENSITIVE", 0x07F: "RC_MAX_FM0", } TPM2_FMT1_ERRORS = { 0x001: "TPM_RC_ASYMMETRIC", 0x002: "TPM_RC_ATTRIBUTES", 0x003: "TPM_RC_HASH", 0x004: "TPM_RC_VALUE", 0x005: "TPM_RC_HIERARCHY", 0x007: "TPM_RC_KEY_SIZE", 0x008: "TPM_RC_MGF", 0x009: "TPM_RC_MODE", 0x00A: "TPM_RC_TYPE", 0x00B: "TPM_RC_HANDLE", 0x00C: "TPM_RC_KDF", 0x00D: "TPM_RC_RANGE", 0x00E: "TPM_RC_AUTH_FAIL", 0x00F: "TPM_RC_NONCE", 0x010: "TPM_RC_PP", 0x012: "TPM_RC_SCHEME", 0x015: "TPM_RC_SIZE", 0x016: "TPM_RC_SYMMETRIC", 0x017: "TPM_RC_TAG", 0x018: "TPM_RC_SELECTOR", 0x01A: "TPM_RC_INSUFFICIENT", 0x01B: "TPM_RC_SIGNATURE", 0x01C: "TPM_RC_KEY", 0x01D: "TPM_RC_POLICY_FAIL", 0x01F: "TPM_RC_INTEGRITY", 0x020: "TPM_RC_TICKET", 0x021: "TPM_RC_RESERVED_BITS", 0x022: "TPM_RC_BAD_AUTH", 0x023: "TPM_RC_EXPIRED", 0x024: "TPM_RC_POLICY_CC", 0x025: "TPM_RC_BINDING", 0x026: "TPM_RC_CURVE", 0x027: "TPM_RC_ECC_POINT", } TPM2_WARN_ERRORS = { 0x001: "TPM_RC_CONTEXT_GAP", 0x002: "TPM_RC_OBJECT_MEMORY", 0x003: "TPM_RC_SESSION_MEMORY", 0x004: "TPM_RC_MEMORY", 0x005: "TPM_RC_SESSION_HANDLES", 0x006: "TPM_RC_OBJECT_HANDLES", 0x007: "TPM_RC_LOCALITY", 0x008: "TPM_RC_YIELDED", 0x009: "TPM_RC_CANCELED", 0x00A: "TPM_RC_TESTING", 0x010: "TPM_RC_REFERENCE_H0", 0x011: "TPM_RC_REFERENCE_H1", 0x012: "TPM_RC_REFERENCE_H2", 0x013: "TPM_RC_REFERENCE_H3", 0x014: "TPM_RC_REFERENCE_H4", 0x015: "TPM_RC_REFERENCE_H5", 0x016: "TPM_RC_REFERENCE_H6", 0x018: "TPM_RC_REFERENCE_S0", 0x019: "TPM_RC_REFERENCE_S1", 0x01A: "TPM_RC_REFERENCE_S2", 0x01B: "TPM_RC_REFERENCE_S3", 0x01C: "TPM_RC_REFERENCE_S4", 0x01D: "TPM_RC_REFERENCE_S5", 0x01E: "TPM_RC_REFERENCE_S6", 0x020: "TPM_RC_NV_RATE", 0x021: "TPM_RC_LOCKOUT", 0x022: "TPM_RC_RETRY", 0x023: "TPM_RC_NV_UNAVAILABLE", 0x7F: "TPM_RC_NOT_USED", } RC_VER1 = 0x100 RC_FMT1 = 0x080 RC_WARN = 0x900 ALG_DIGEST_SIZE_MAP = { TPM2_ALG_SHA1: SHA1_DIGEST_SIZE, TPM2_ALG_SHA256: SHA256_DIGEST_SIZE, } ALG_HASH_FUNCTION_MAP = { TPM2_ALG_SHA1: hashlib.sha1, TPM2_ALG_SHA256: hashlib.sha256 } NAME_ALG_MAP = { "sha1": TPM2_ALG_SHA1, "sha256": TPM2_ALG_SHA256, } class UnknownAlgorithmIdError(Exception): def __init__(self, alg): self.alg = alg def __str__(self): return '0x%0x' % (alg) class UnknownAlgorithmNameError(Exception): def __init__(self, name): self.name = name def __str__(self): return name class UnknownPCRBankError(Exception): def __init__(self, alg): self.alg = alg def __str__(self): return '0x%0x' % (alg) class ProtocolError(Exception): def __init__(self, cc, rc): self.cc = cc self.rc = rc if (rc & RC_FMT1) == RC_FMT1: self.name = TPM2_FMT1_ERRORS.get(rc & 0x3f, "TPM_RC_UNKNOWN") elif (rc & RC_WARN) == RC_WARN: self.name = TPM2_WARN_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN") elif (rc & RC_VER1) == RC_VER1: self.name = TPM2_VER1_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN") else: self.name = TPM2_VER0_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN") def __str__(self): if self.cc: return '%s: cc=0x%08x, rc=0x%08x' % (self.name, self.cc, self.rc) else: return '%s: rc=0x%08x' % (self.name, self.rc) class AuthCommand(object): """TPMS_AUTH_COMMAND""" def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(), session_attributes=0, hmac=bytes()): self.session_handle = session_handle self.nonce = nonce self.session_attributes = session_attributes self.hmac = hmac def __bytes__(self): fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac)) return struct.pack(fmt, self.session_handle, len(self.nonce), self.nonce, self.session_attributes, len(self.hmac), self.hmac) def __len__(self): fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac)) return struct.calcsize(fmt) class SensitiveCreate(object): """TPMS_SENSITIVE_CREATE""" def __init__(self, user_auth=bytes(), data=bytes()): self.user_auth = user_auth self.data = data def __bytes__(self): fmt = '>H%us H%us' % (len(self.user_auth), len(self.data)) return struct.pack(fmt, len(self.user_auth), self.user_auth, len(self.data), self.data) def __len__(self): fmt = '>H%us H%us' % (len(self.user_auth), len(self.data)) return struct.calcsize(fmt) class Public(object): """TPMT_PUBLIC""" FIXED_TPM = (1 << 1) FIXED_PARENT = (1 << 4) SENSITIVE_DATA_ORIGIN = (1 << 5) USER_WITH_AUTH = (1 << 6) RESTRICTED = (1 << 16) DECRYPT = (1 << 17) def __fmt(self): return '>HHIH%us%usH%us' % \ (len(self.auth_policy), len(self.parameters), len(self.unique)) def __init__(self, object_type, name_alg, object_attributes, auth_policy=bytes(), parameters=bytes(), unique=bytes()): self.object_type = object_type self.name_alg = name_alg self.object_attributes = object_attributes self.auth_policy = auth_policy self.parameters = parameters self.unique = unique def __bytes__(self): return struct.pack(self.__fmt(), self.object_type, self.name_alg, self.object_attributes, len(self.auth_policy), self.auth_policy, self.parameters, len(self.unique), self.unique) def __len__(self): return struct.calcsize(self.__fmt()) def get_digest_size(alg): ds = ALG_DIGEST_SIZE_MAP.get(alg) if not ds: raise UnknownAlgorithmIdError(alg) return ds def get_hash_function(alg): f = ALG_HASH_FUNCTION_MAP.get(alg) if not f: raise UnknownAlgorithmIdError(alg) return f def get_algorithm(name): alg = NAME_ALG_MAP.get(name) if not alg: raise UnknownAlgorithmNameError(name) return alg def hex_dump(d): d = [format(ord(x), '02x') for x in d] d = [d[i: i + 16] for i in range(0, len(d), 16)] d = [' '.join(x) for x in d] d = os.linesep.join(d) return d class Client: FLAG_DEBUG = 0x01 FLAG_SPACE = 0x02 FLAG_NONBLOCK = 0x04 TPM_IOC_NEW_SPACE = 0xa200 def __init__(self, flags = 0): self.flags = flags if (self.flags & Client.FLAG_SPACE) == 0: self.tpm = open('/dev/tpm0', 'r+b', buffering=0) else: self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0) if (self.flags & Client.FLAG_NONBLOCK): flags = fcntl.fcntl(self.tpm, fcntl.F_GETFL) flags |= os.O_NONBLOCK fcntl.fcntl(self.tpm, fcntl.F_SETFL, flags) self.tpm_poll = select.poll() def __del__(self): if self.tpm: self.tpm.close() def close(self): self.tpm.close() def send_cmd(self, cmd): self.tpm.write(cmd) if (self.flags & Client.FLAG_NONBLOCK): self.tpm_poll.register(self.tpm, select.POLLIN) self.tpm_poll.poll(10000) rsp = self.tpm.read() if (self.flags & Client.FLAG_NONBLOCK): self.tpm_poll.unregister(self.tpm) if (self.flags & Client.FLAG_DEBUG) != 0: sys.stderr.write('cmd' + os.linesep) sys.stderr.write(hex_dump(cmd) + os.linesep) sys.stderr.write('rsp' + os.linesep) sys.stderr.write(hex_dump(rsp) + os.linesep) rc = struct.unpack('>I', rsp[6:10])[0] if rc != 0: cc = struct.unpack('>I', cmd[6:10])[0] raise ProtocolError(cc, rc) return rsp def read_pcr(self, i, bank_alg = TPM2_ALG_SHA1): pcrsel_len = max((i >> 3) + 1, 3) pcrsel = [0] * pcrsel_len pcrsel[i >> 3] = 1 << (i & 7) pcrsel = ''.join(map(chr, pcrsel)).encode() fmt = '>HII IHB%us' % (pcrsel_len) cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_PCR_READ, 1, bank_alg, pcrsel_len, pcrsel) rsp = self.send_cmd(cmd) pcr_update_cnt, pcr_select_cnt = struct.unpack('>II', rsp[10:18]) assert pcr_select_cnt == 1 rsp = rsp[18:] alg2, pcrsel_len2 = struct.unpack('>HB', rsp[:3]) assert bank_alg == alg2 and pcrsel_len == pcrsel_len2 rsp = rsp[3 + pcrsel_len:] digest_cnt = struct.unpack('>I', rsp[:4])[0] if digest_cnt == 0: return None rsp = rsp[6:] return rsp def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1): ds = get_digest_size(bank_alg) assert(ds == len(dig)) auth_cmd = AuthCommand() fmt = '>HII I I%us IH%us' % (len(auth_cmd), ds) cmd = struct.pack( fmt, TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_PCR_EXTEND, i, len(auth_cmd), bytes(auth_cmd), 1, bank_alg, dig) self.send_cmd(cmd) def start_auth_session(self, session_type, name_alg = TPM2_ALG_SHA1): fmt = '>HII IIH16sHBHH' cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_START_AUTH_SESSION, TPM2_RH_NULL, TPM2_RH_NULL, 16, ('\0' * 16).encode(), 0, session_type, TPM2_ALG_NULL, name_alg) return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] def __calc_pcr_digest(self, pcrs, bank_alg = TPM2_ALG_SHA1, digest_alg = TPM2_ALG_SHA1): x = [] f = get_hash_function(digest_alg) for i in pcrs: pcr = self.read_pcr(i, bank_alg) if pcr is None: return None x += pcr return f(bytearray(x)).digest() def policy_pcr(self, handle, pcrs, bank_alg = TPM2_ALG_SHA1, name_alg = TPM2_ALG_SHA1): ds = get_digest_size(name_alg) dig = self.__calc_pcr_digest(pcrs, bank_alg, name_alg) if not dig: raise UnknownPCRBankError(bank_alg) pcrsel_len = max((max(pcrs) >> 3) + 1, 3) pcrsel = [0] * pcrsel_len for i in pcrs: pcrsel[i >> 3] |= 1 << (i & 7) pcrsel = ''.join(map(chr, pcrsel)).encode() fmt = '>HII IH%usIHB3s' % ds cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_POLICY_PCR, handle, len(dig), bytes(dig), 1, bank_alg, pcrsel_len, pcrsel) self.send_cmd(cmd) def policy_password(self, handle): fmt = '>HII I' cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_POLICY_PASSWORD, handle) self.send_cmd(cmd) def get_policy_digest(self, handle): fmt = '>HII I' cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_POLICY_GET_DIGEST, handle) return self.send_cmd(cmd)[12:] def flush_context(self, handle): fmt = '>HIII' cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_FLUSH_CONTEXT, handle) self.send_cmd(cmd) def create_root_key(self, auth_value = bytes()): attributes = \ Public.FIXED_TPM | \ Public.FIXED_PARENT | \ Public.SENSITIVE_DATA_ORIGIN | \ Public.USER_WITH_AUTH | \ Public.RESTRICTED | \ Public.DECRYPT auth_cmd = AuthCommand() sensitive = SensitiveCreate(user_auth=auth_value) public_parms = struct.pack( '>HHHHHI', TPM2_ALG_AES, 128, TPM2_ALG_CFB, TPM2_ALG_NULL, 2048, 0) public = Public( object_type=TPM2_ALG_RSA, name_alg=TPM2_ALG_SHA1, object_attributes=attributes, parameters=public_parms) fmt = '>HIII I%us H%us H%us HI' % \ (len(auth_cmd), len(sensitive), len(public)) cmd = struct.pack( fmt, TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_CREATE_PRIMARY, TPM2_RH_OWNER, len(auth_cmd), bytes(auth_cmd), len(sensitive), bytes(sensitive), len(public), bytes(public), 0, 0) return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] def seal(self, parent_key, data, auth_value, policy_dig, name_alg = TPM2_ALG_SHA1): ds = get_digest_size(name_alg) assert(not policy_dig or ds == len(policy_dig)) attributes = 0 if not policy_dig: attributes |= Public.USER_WITH_AUTH policy_dig = bytes() auth_cmd = AuthCommand() sensitive = SensitiveCreate(user_auth=auth_value, data=data) public = Public( object_type=TPM2_ALG_KEYEDHASH, name_alg=name_alg, object_attributes=attributes, auth_policy=policy_dig, parameters=struct.pack('>H', TPM2_ALG_NULL)) fmt = '>HIII I%us H%us H%us HI' % \ (len(auth_cmd), len(sensitive), len(public)) cmd = struct.pack( fmt, TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_CREATE, parent_key, len(auth_cmd), bytes(auth_cmd), len(sensitive), bytes(sensitive), len(public), bytes(public), 0, 0) rsp = self.send_cmd(cmd) return rsp[14:] def unseal(self, parent_key, blob, auth_value, policy_handle): private_len = struct.unpack('>H', blob[0:2])[0] public_start = private_len + 2 public_len = struct.unpack('>H', blob[public_start:public_start + 2])[0] blob = blob[:private_len + public_len + 4] auth_cmd = AuthCommand() fmt = '>HII I I%us %us' % (len(auth_cmd), len(blob)) cmd = struct.pack( fmt, TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_LOAD, parent_key, len(auth_cmd), bytes(auth_cmd), blob) data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] if policy_handle: auth_cmd = AuthCommand(session_handle=policy_handle, hmac=auth_value) else: auth_cmd = AuthCommand(hmac=auth_value) fmt = '>HII I I%us' % (len(auth_cmd)) cmd = struct.pack( fmt, TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_UNSEAL, data_handle, len(auth_cmd), bytes(auth_cmd)) try: rsp = self.send_cmd(cmd) finally: self.flush_context(data_handle) data_len = struct.unpack('>I', rsp[10:14])[0] - 2 return rsp[16:16 + data_len] def reset_da_lock(self): auth_cmd = AuthCommand() fmt = '>HII I I%us' % (len(auth_cmd)) cmd = struct.pack( fmt, TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET, TPM2_RH_LOCKOUT, len(auth_cmd), bytes(auth_cmd)) self.send_cmd(cmd) def __get_cap_cnt(self, cap, pt, cnt): handles = [] fmt = '>HII III' cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_GET_CAPABILITY, cap, pt, cnt) rsp = self.send_cmd(cmd)[10:] more_data, cap, cnt = struct.unpack('>BII', rsp[:9]) rsp = rsp[9:] for i in range(0, cnt): handle = struct.unpack('>I', rsp[:4])[0] handles.append(handle) rsp = rsp[4:] return handles, more_data def get_cap(self, cap, pt): handles = [] more_data = True while more_data: next_handles, more_data = self.__get_cap_cnt(cap, pt, 1) handles += next_handles pt += 1 return handles def get_cap_pcrs(self): pcr_banks = {} fmt = '>HII III' cmd = struct.pack(fmt, TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_GET_CAPABILITY, TPM2_CAP_PCRS, 0, 1) rsp = self.send_cmd(cmd)[10:] _, _, cnt = struct.unpack('>BII', rsp[:9]) rsp = rsp[9:] # items are TPMS_PCR_SELECTION's for i in range(0, cnt): hash, sizeOfSelect = struct.unpack('>HB', rsp[:3]) rsp = rsp[3:] pcrSelect = 0 if sizeOfSelect > 0: pcrSelect, = struct.unpack('%ds' % sizeOfSelect, rsp[:sizeOfSelect]) rsp = rsp[sizeOfSelect:] pcrSelect = int.from_bytes(pcrSelect, byteorder='big') pcr_banks[hash] = pcrSelect return pcr_banks
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tpm2/tpm2.py
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) from argparse import ArgumentParser from argparse import FileType import os import sys import tpm2 from tpm2 import ProtocolError import unittest import logging import struct class SmokeTest(unittest.TestCase): def setUp(self): self.client = tpm2.Client() self.root_key = self.client.create_root_key() def tearDown(self): self.client.flush_context(self.root_key) self.client.close() def test_seal_with_auth(self): data = ('X' * 64).encode() auth = ('A' * 15).encode() blob = self.client.seal(self.root_key, data, auth, None) result = self.client.unseal(self.root_key, blob, auth, None) self.assertEqual(data, result) def determine_bank_alg(self, mask): pcr_banks = self.client.get_cap_pcrs() for bank_alg, pcrSelection in pcr_banks.items(): if pcrSelection & mask == mask: return bank_alg return None def test_seal_with_policy(self): bank_alg = self.determine_bank_alg(1 << 16) self.assertIsNotNone(bank_alg) handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL) data = ('X' * 64).encode() auth = ('A' * 15).encode() pcrs = [16] try: self.client.policy_pcr(handle, pcrs, bank_alg=bank_alg) self.client.policy_password(handle) policy_dig = self.client.get_policy_digest(handle) finally: self.client.flush_context(handle) blob = self.client.seal(self.root_key, data, auth, policy_dig) handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) try: self.client.policy_pcr(handle, pcrs, bank_alg=bank_alg) self.client.policy_password(handle) result = self.client.unseal(self.root_key, blob, auth, handle) except: self.client.flush_context(handle) raise self.assertEqual(data, result) def test_unseal_with_wrong_auth(self): data = ('X' * 64).encode() auth = ('A' * 20).encode() rc = 0 blob = self.client.seal(self.root_key, data, auth, None) try: result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B'.encode(), None) except ProtocolError as e: rc = e.rc self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL) def test_unseal_with_wrong_policy(self): bank_alg = self.determine_bank_alg(1 << 16 | 1 << 1) self.assertIsNotNone(bank_alg) handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL) data = ('X' * 64).encode() auth = ('A' * 17).encode() pcrs = [16] try: self.client.policy_pcr(handle, pcrs, bank_alg=bank_alg) self.client.policy_password(handle) policy_dig = self.client.get_policy_digest(handle) finally: self.client.flush_context(handle) blob = self.client.seal(self.root_key, data, auth, policy_dig) # Extend first a PCR that is not part of the policy and try to unseal. # This should succeed. ds = tpm2.get_digest_size(bank_alg) self.client.extend_pcr(1, ('X' * ds).encode(), bank_alg=bank_alg) handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) try: self.client.policy_pcr(handle, pcrs, bank_alg=bank_alg) self.client.policy_password(handle) result = self.client.unseal(self.root_key, blob, auth, handle) except: self.client.flush_context(handle) raise self.assertEqual(data, result) # Then, extend a PCR that is part of the policy and try to unseal. # This should fail. self.client.extend_pcr(16, ('X' * ds).encode(), bank_alg=bank_alg) handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) rc = 0 try: self.client.policy_pcr(handle, pcrs, bank_alg=bank_alg) self.client.policy_password(handle) result = self.client.unseal(self.root_key, blob, auth, handle) except ProtocolError as e: rc = e.rc self.client.flush_context(handle) except: self.client.flush_context(handle) raise self.assertEqual(rc, tpm2.TPM2_RC_POLICY_FAIL) def test_seal_with_too_long_auth(self): ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1) data = ('X' * 64).encode() auth = ('A' * (ds + 1)).encode() rc = 0 try: blob = self.client.seal(self.root_key, data, auth, None) except ProtocolError as e: rc = e.rc self.assertEqual(rc, tpm2.TPM2_RC_SIZE) def test_too_short_cmd(self): rejected = False try: fmt = '>HIII' cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt) + 1, tpm2.TPM2_CC_FLUSH_CONTEXT, 0xDEADBEEF) self.client.send_cmd(cmd) except IOError as e: rejected = True except: pass self.assertEqual(rejected, True) def test_read_partial_resp(self): try: fmt = '>HIIH' cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), tpm2.TPM2_CC_GET_RANDOM, 0x20) self.client.tpm.write(cmd) hdr = self.client.tpm.read(10) sz = struct.unpack('>I', hdr[2:6])[0] rsp = self.client.tpm.read() except: pass self.assertEqual(sz, 10 + 2 + 32) self.assertEqual(len(rsp), 2 + 32) def test_read_partial_overwrite(self): try: fmt = '>HIIH' cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), tpm2.TPM2_CC_GET_RANDOM, 0x20) self.client.tpm.write(cmd) # Read part of the respone rsp1 = self.client.tpm.read(15) # Send a new cmd self.client.tpm.write(cmd) # Read the whole respone rsp2 = self.client.tpm.read() except: pass self.assertEqual(len(rsp1), 15) self.assertEqual(len(rsp2), 10 + 2 + 32) def test_send_two_cmds(self): rejected = False try: fmt = '>HIIH' cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), tpm2.TPM2_CC_GET_RANDOM, 0x20) self.client.tpm.write(cmd) # expect the second one to raise -EBUSY error self.client.tpm.write(cmd) rsp = self.client.tpm.read() except IOError as e: # read the response rsp = self.client.tpm.read() rejected = True pass except: pass self.assertEqual(rejected, True) class SpaceTest(unittest.TestCase): def setUp(self): logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG) def test_make_two_spaces(self): log = logging.getLogger(__name__) log.debug("test_make_two_spaces") space1 = tpm2.Client(tpm2.Client.FLAG_SPACE) root1 = space1.create_root_key() space2 = tpm2.Client(tpm2.Client.FLAG_SPACE) root2 = space2.create_root_key() root3 = space2.create_root_key() log.debug("%08x" % (root1)) log.debug("%08x" % (root2)) log.debug("%08x" % (root3)) def test_flush_context(self): log = logging.getLogger(__name__) log.debug("test_flush_context") space1 = tpm2.Client(tpm2.Client.FLAG_SPACE) root1 = space1.create_root_key() log.debug("%08x" % (root1)) space1.flush_context(root1) def test_get_handles(self): log = logging.getLogger(__name__) log.debug("test_get_handles") space1 = tpm2.Client(tpm2.Client.FLAG_SPACE) space1.create_root_key() space2 = tpm2.Client(tpm2.Client.FLAG_SPACE) space2.create_root_key() space2.create_root_key() handles = space2.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_TRANSIENT) self.assertEqual(len(handles), 2) log.debug("%08x" % (handles[0])) log.debug("%08x" % (handles[1])) def test_invalid_cc(self): log = logging.getLogger(__name__) log.debug(sys._getframe().f_code.co_name) TPM2_CC_INVALID = tpm2.TPM2_CC_FIRST - 1 space1 = tpm2.Client(tpm2.Client.FLAG_SPACE) root1 = space1.create_root_key() log.debug("%08x" % (root1)) fmt = '>HII' cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_INVALID) rc = 0 try: space1.send_cmd(cmd) except ProtocolError as e: rc = e.rc self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE | tpm2.TSS2_RESMGR_TPM_RC_LAYER) class AsyncTest(unittest.TestCase): def setUp(self): logging.basicConfig(filename='AsyncTest.log', level=logging.DEBUG) def test_async(self): log = logging.getLogger(__name__) log.debug(sys._getframe().f_code.co_name) async_client = tpm2.Client(tpm2.Client.FLAG_NONBLOCK) log.debug("Calling get_cap in a NON_BLOCKING mode") async_client.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_LOADED_SESSION) async_client.close() def test_flush_invalid_context(self): log = logging.getLogger(__name__) log.debug(sys._getframe().f_code.co_name) async_client = tpm2.Client(tpm2.Client.FLAG_SPACE | tpm2.Client.FLAG_NONBLOCK) log.debug("Calling flush_context passing in an invalid handle ") handle = 0x80123456 rc = 0 try: async_client.flush_context(handle) except OSError as e: rc = e.errno self.assertEqual(rc, 22) async_client.close()
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/tpm2/tpm2_tests.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # Test that truncation of bprm->buf doesn't cause unexpected execs paths, along # with various other pathological cases. import os, subprocess # Relevant commits # # b5372fe5dc84 ("exec: load_script: Do not exec truncated interpreter path") # 6eb3c3d0a52d ("exec: increase BINPRM_BUF_SIZE to 256") # BINPRM_BUF_SIZE SIZE=256 NAME_MAX=int(subprocess.check_output(["getconf", "NAME_MAX", "."])) test_num=0 code='''#!/usr/bin/perl print "Executed interpreter! Args:\n"; print "0 : '$0'\n"; $counter = 1; foreach my $a (@ARGV) { print "$counter : '$a'\n"; $counter++; } ''' ## # test - produce a binfmt_script hashbang line for testing # # @size: bytes for bprm->buf line, including hashbang but not newline # @good: whether this script is expected to execute correctly # @hashbang: the special 2 bytes for running binfmt_script # @leading: any leading whitespace before the executable path # @root: start of executable pathname # @target: end of executable pathname # @arg: bytes following the executable pathname # @fill: character to fill between @root and @target to reach @size bytes # @newline: character to use as newline, not counted towards @size # ... def test(name, size, good=True, leading="", root="./", target="/perl", fill="A", arg="", newline="\n", hashbang="#!"): global test_num, tests, NAME_MAX test_num += 1 if test_num > tests: raise ValueError("more binfmt_script tests than expected! (want %d, expected %d)" % (test_num, tests)) middle = "" remaining = size - len(hashbang) - len(leading) - len(root) - len(target) - len(arg) # The middle of the pathname must not exceed NAME_MAX while remaining >= NAME_MAX: middle += fill * (NAME_MAX - 1) middle += '/' remaining -= NAME_MAX middle += fill * remaining dirpath = root + middle binary = dirpath + target if len(target): os.makedirs(dirpath, mode=0o755, exist_ok=True) open(binary, "w").write(code) os.chmod(binary, 0o755) buf=hashbang + leading + root + middle + target + arg + newline if len(newline) > 0: buf += 'echo this is not really perl\n' script = "binfmt_script-%s" % (name) open(script, "w").write(buf) os.chmod(script, 0o755) proc = subprocess.Popen(["./%s" % (script)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout = proc.communicate()[0] if proc.returncode == 0 and b'Executed interpreter' in stdout: if good: print("ok %d - binfmt_script %s (successful good exec)" % (test_num, name)) else: print("not ok %d - binfmt_script %s succeeded when it should have failed" % (test_num, name)) else: if good: print("not ok %d - binfmt_script %s failed when it should have succeeded (rc:%d)" % (test_num, name, proc.returncode)) else: print("ok %d - binfmt_script %s (correctly failed bad exec)" % (test_num, name)) # Clean up crazy binaries os.unlink(script) if len(target): elements = binary.split('/') os.unlink(binary) elements.pop() while len(elements) > 1: os.rmdir("/".join(elements)) elements.pop() tests=27 print("TAP version 1.3") print("1..%d" % (tests)) ### FAIL (8 tests) # Entire path is well past the BINFMT_BUF_SIZE. test(name="too-big", size=SIZE+80, good=False) # Path is right at max size, making it impossible to tell if it was truncated. test(name="exact", size=SIZE, good=False) # Same as above, but with leading whitespace. test(name="exact-space", size=SIZE, good=False, leading=" ") # Huge buffer of only whitespace. test(name="whitespace-too-big", size=SIZE+71, good=False, root="", fill=" ", target="") # A good path, but it gets truncated due to leading whitespace. test(name="truncated", size=SIZE+17, good=False, leading=" " * 19) # Entirely empty except for #! test(name="empty", size=2, good=False, root="", fill="", target="", newline="") # Within size, but entirely spaces test(name="spaces", size=SIZE-1, good=False, root="", fill=" ", target="", newline="") # Newline before binary. test(name="newline-prefix", size=SIZE-1, good=False, leading="\n", root="", fill=" ", target="") ### ok (19 tests) # The original test case that was broken by commit: # 8099b047ecc4 ("exec: load_script: don't blindly truncate shebang string") test(name="test.pl", size=439, leading=" ", root="./nix/store/bwav8kz8b3y471wjsybgzw84mrh4js9-perl-5.28.1/bin", arg=" -I/nix/store/x6yyav38jgr924nkna62q3pkp0dgmzlx-perl5.28.1-File-Slurp-9999.25/lib/perl5/site_perl -I/nix/store/ha8v67sl8dac92r9z07vzr4gv1y9nwqz-perl5.28.1-Net-DBus-1.1.0/lib/perl5/site_perl -I/nix/store/dcrkvnjmwh69ljsvpbdjjdnqgwx90a9d-perl5.28.1-XML-Parser-2.44/lib/perl5/site_perl -I/nix/store/rmji88k2zz7h4zg97385bygcydrf2q8h-perl5.28.1-XML-Twig-3.52/lib/perl5/site_perl") # One byte under size, leaving newline visible. test(name="one-under", size=SIZE-1) # Two bytes under size, leaving newline visible. test(name="two-under", size=SIZE-2) # Exact size, but trailing whitespace visible instead of newline test(name="exact-trunc-whitespace", size=SIZE, arg=" ") # Exact size, but trailing space and first arg char visible instead of newline. test(name="exact-trunc-arg", size=SIZE, arg=" f") # One bute under, with confirmed non-truncated arg since newline now visible. test(name="one-under-full-arg", size=SIZE-1, arg=" f") # Short read buffer by one byte. test(name="one-under-no-nl", size=SIZE-1, newline="") # Short read buffer by half buffer size. test(name="half-under-no-nl", size=int(SIZE/2), newline="") # One byte under with whitespace arg. leaving wenline visible. test(name="one-under-trunc-arg", size=SIZE-1, arg=" ") # One byte under with whitespace leading. leaving wenline visible. test(name="one-under-leading", size=SIZE-1, leading=" ") # One byte under with whitespace leading and as arg. leaving newline visible. test(name="one-under-leading-trunc-arg", size=SIZE-1, leading=" ", arg=" ") # Same as above, but with 2 bytes under test(name="two-under-no-nl", size=SIZE-2, newline="") test(name="two-under-trunc-arg", size=SIZE-2, arg=" ") test(name="two-under-leading", size=SIZE-2, leading=" ") test(name="two-under-leading-trunc-arg", size=SIZE-2, leading=" ", arg=" ") # Same as above, but with buffer half filled test(name="two-under-no-nl", size=int(SIZE/2), newline="") test(name="two-under-trunc-arg", size=int(SIZE/2), arg=" ") test(name="two-under-leading", size=int(SIZE/2), leading=" ") test(name="two-under-lead-trunc-arg", size=int(SIZE/2), leading=" ", arg=" ") if test_num != tests: raise ValueError("fewer binfmt_script tests than expected! (ran %d, expected %d" % (test_num, tests))
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/exec/binfmt_script.py
#!/usr/bin/env python3 # Copyright (C) 2017 Netronome Systems, Inc. # Copyright (c) 2019 Mellanox Technologies. All rights reserved # # This software is licensed under the GNU General License Version 2, # June 1991 as shown in the file COPYING in the top-level directory of this # source tree. # # THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" # WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE # OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME # THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. from datetime import datetime import argparse import errno import json import os import pprint import random import re import stat import string import struct import subprocess import time import traceback logfile = None log_level = 1 skip_extack = False bpf_test_dir = os.path.dirname(os.path.realpath(__file__)) pp = pprint.PrettyPrinter() devs = [] # devices we created for clean up files = [] # files to be removed netns = [] # net namespaces to be removed def log_get_sec(level=0): return "*" * (log_level + level) def log_level_inc(add=1): global log_level log_level += add def log_level_dec(sub=1): global log_level log_level -= sub def log_level_set(level): global log_level log_level = level def log(header, data, level=None): """ Output to an optional log. """ if logfile is None: return if level is not None: log_level_set(level) if not isinstance(data, str): data = pp.pformat(data) if len(header): logfile.write("\n" + log_get_sec() + " ") logfile.write(header) if len(header) and len(data.strip()): logfile.write("\n") logfile.write(data) def skip(cond, msg): if not cond: return print("SKIP: " + msg) log("SKIP: " + msg, "", level=1) os.sys.exit(0) def fail(cond, msg): if not cond: return print("FAIL: " + msg) tb = "".join(traceback.extract_stack().format()) print(tb) log("FAIL: " + msg, tb, level=1) os.sys.exit(1) def start_test(msg): log(msg, "", level=1) log_level_inc() print(msg) def cmd(cmd, shell=True, include_stderr=False, background=False, fail=True): """ Run a command in subprocess and return tuple of (retval, stdout); optionally return stderr as well as third value. """ proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if background: msg = "%s START: %s" % (log_get_sec(1), datetime.now().strftime("%H:%M:%S.%f")) log("BKG " + proc.args, msg) return proc return cmd_result(proc, include_stderr=include_stderr, fail=fail) def cmd_result(proc, include_stderr=False, fail=False): stdout, stderr = proc.communicate() stdout = stdout.decode("utf-8") stderr = stderr.decode("utf-8") proc.stdout.close() proc.stderr.close() stderr = "\n" + stderr if stderr[-1] == "\n": stderr = stderr[:-1] sec = log_get_sec(1) log("CMD " + proc.args, "RETCODE: %d\n%s STDOUT:\n%s%s STDERR:%s\n%s END: %s" % (proc.returncode, sec, stdout, sec, stderr, sec, datetime.now().strftime("%H:%M:%S.%f"))) if proc.returncode != 0 and fail: if len(stderr) > 0 and stderr[-1] == "\n": stderr = stderr[:-1] raise Exception("Command failed: %s\n%s" % (proc.args, stderr)) if include_stderr: return proc.returncode, stdout, stderr else: return proc.returncode, stdout def rm(f): cmd("rm -f %s" % (f)) if f in files: files.remove(f) def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False): params = "" if JSON: params += "%s " % (flags["json"]) if ns != "": ns = "ip netns exec %s " % (ns) if include_stderr: ret, stdout, stderr = cmd(ns + name + " " + params + args, fail=fail, include_stderr=True) else: ret, stdout = cmd(ns + name + " " + params + args, fail=fail, include_stderr=False) if JSON and len(stdout.strip()) != 0: out = json.loads(stdout) else: out = stdout if include_stderr: return ret, out, stderr else: return ret, out def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False): return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail, include_stderr=include_stderr) def bpftool_prog_list(expected=None, ns=""): _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True) # Remove the base progs for p in base_progs: if p in progs: progs.remove(p) if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs loaded, expected %d" % (len(progs), expected)) return progs def bpftool_map_list(expected=None, ns=""): _, maps = bpftool("map show", JSON=True, ns=ns, fail=True) # Remove the base maps maps = [m for m in maps if m not in base_maps and m.get('name') and m.get('name') not in base_map_names] if expected is not None: if len(maps) != expected: fail(True, "%d BPF maps loaded, expected %d" % (len(maps), expected)) return maps def bpftool_prog_list_wait(expected=0, n_retry=20): for i in range(n_retry): nprogs = len(bpftool_prog_list()) if nprogs == expected: return time.sleep(0.05) raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs)) def bpftool_map_list_wait(expected=0, n_retry=20): for i in range(n_retry): nmaps = len(bpftool_map_list()) if nmaps == expected: return time.sleep(0.05) raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps)) def bpftool_prog_load(sample, file_name, maps=[], prog_type="xdp", dev=None, fail=True, include_stderr=False): args = "prog load %s %s" % (os.path.join(bpf_test_dir, sample), file_name) if prog_type is not None: args += " type " + prog_type if dev is not None: args += " dev " + dev if len(maps): args += " map " + " map ".join(maps) res = bpftool(args, fail=fail, include_stderr=include_stderr) if res[0] == 0: files.append(file_name) return res def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False): if force: args = "-force " + args return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns, fail=fail, include_stderr=include_stderr) def tc(args, JSON=True, ns="", fail=True, include_stderr=False): return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail, include_stderr=include_stderr) def ethtool(dev, opt, args, fail=True): return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail) def bpf_obj(name, sec=".text", path=bpf_test_dir,): return "obj %s sec %s" % (os.path.join(path, name), sec) def bpf_pinned(name): return "pinned %s" % (name) def bpf_bytecode(bytecode): return "bytecode \"%s\"" % (bytecode) def mknetns(n_retry=10): for i in range(n_retry): name = ''.join([random.choice(string.ascii_letters) for i in range(8)]) ret, _ = ip("netns add %s" % (name), fail=False) if ret == 0: netns.append(name) return name return None def int2str(fmt, val): ret = [] for b in struct.pack(fmt, val): ret.append(int(b)) return " ".join(map(lambda x: str(x), ret)) def str2int(strtab): inttab = [] for i in strtab: inttab.append(int(i, 16)) ba = bytearray(inttab) if len(strtab) == 4: fmt = "I" elif len(strtab) == 8: fmt = "Q" else: raise Exception("String array of len %d can't be unpacked to an int" % (len(strtab))) return struct.unpack(fmt, ba)[0] class DebugfsDir: """ Class for accessing DebugFS directories as a dictionary. """ def __init__(self, path): self.path = path self._dict = self._debugfs_dir_read(path) def __len__(self): return len(self._dict.keys()) def __getitem__(self, key): if type(key) is int: key = list(self._dict.keys())[key] return self._dict[key] def __setitem__(self, key, value): log("DebugFS set %s = %s" % (key, value), "") log_level_inc() cmd("echo '%s' > %s/%s" % (value, self.path, key)) log_level_dec() _, out = cmd('cat %s/%s' % (self.path, key)) self._dict[key] = out.strip() def _debugfs_dir_read(self, path): dfs = {} log("DebugFS state for %s" % (path), "") log_level_inc(add=2) _, out = cmd('ls ' + path) for f in out.split(): if f == "ports": continue p = os.path.join(path, f) if not os.stat(p).st_mode & stat.S_IRUSR: continue if os.path.isfile(p): # We need to init trap_flow_action_cookie before read it if f == "trap_flow_action_cookie": cmd('echo deadbeef > %s/%s' % (path, f)) _, out = cmd('cat %s/%s' % (path, f)) dfs[f] = out.strip() elif os.path.isdir(p): dfs[f] = DebugfsDir(p) else: raise Exception("%s is neither file nor directory" % (p)) log_level_dec() log("DebugFS state", dfs) log_level_dec() return dfs class NetdevSimDev: """ Class for netdevsim bus device and its attributes. """ @staticmethod def ctrl_write(path, val): fullpath = os.path.join("/sys/bus/netdevsim/", path) try: with open(fullpath, "w") as f: f.write(val) except OSError as e: log("WRITE %s: %r" % (fullpath, val), -e.errno) raise e log("WRITE %s: %r" % (fullpath, val), 0) def __init__(self, port_count=1): addr = 0 while True: try: self.ctrl_write("new_device", "%u %u" % (addr, port_count)) except OSError as e: if e.errno == errno.ENOSPC: addr += 1 continue raise e break self.addr = addr # As probe of netdevsim device might happen from a workqueue, # so wait here until all netdevs appear. self.wait_for_netdevs(port_count) ret, out = cmd("udevadm settle", fail=False) if ret: raise Exception("udevadm settle failed") ifnames = self.get_ifnames() devs.append(self) self.dfs_dir = "/sys/kernel/debug/netdevsim/netdevsim%u/" % addr self.nsims = [] for port_index in range(port_count): self.nsims.append(NetdevSim(self, port_index, ifnames[port_index])) def get_ifnames(self): ifnames = [] listdir = os.listdir("/sys/bus/netdevsim/devices/netdevsim%u/net/" % self.addr) for ifname in listdir: ifnames.append(ifname) ifnames.sort() return ifnames def wait_for_netdevs(self, port_count): timeout = 5 timeout_start = time.time() while True: try: ifnames = self.get_ifnames() except FileNotFoundError as e: ifnames = [] if len(ifnames) == port_count: break if time.time() < timeout_start + timeout: continue raise Exception("netdevices did not appear within timeout") def dfs_num_bound_progs(self): path = os.path.join(self.dfs_dir, "bpf_bound_progs") _, progs = cmd('ls %s' % (path)) return len(progs.split()) def dfs_get_bound_progs(self, expected): progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs")) if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs bound, expected %d" % (len(progs), expected)) return progs def remove(self): self.ctrl_write("del_device", "%u" % (self.addr, )) devs.remove(self) def remove_nsim(self, nsim): self.nsims.remove(nsim) self.ctrl_write("devices/netdevsim%u/del_port" % (self.addr, ), "%u" % (nsim.port_index, )) class NetdevSim: """ Class for netdevsim netdevice and its attributes. """ def __init__(self, nsimdev, port_index, ifname): # In case udev renamed the netdev to according to new schema, # check if the name matches the port_index. nsimnamere = re.compile("eni\d+np(\d+)") match = nsimnamere.match(ifname) if match and int(match.groups()[0]) != port_index + 1: raise Exception("netdevice name mismatches the expected one") self.nsimdev = nsimdev self.port_index = port_index self.ns = "" self.dfs_dir = "%s/ports/%u/" % (nsimdev.dfs_dir, port_index) self.dfs_refresh() _, [self.dev] = ip("link show dev %s" % ifname) def __getitem__(self, key): return self.dev[key] def remove(self): self.nsimdev.remove_nsim(self) def dfs_refresh(self): self.dfs = DebugfsDir(self.dfs_dir) return self.dfs def dfs_read(self, f): path = os.path.join(self.dfs_dir, f) _, data = cmd('cat %s' % (path)) return data.strip() def wait_for_flush(self, bound=0, total=0, n_retry=20): for i in range(n_retry): nbound = self.nsimdev.dfs_num_bound_progs() nprogs = len(bpftool_prog_list()) if nbound == bound and nprogs == total: return time.sleep(0.05) raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs)) def set_ns(self, ns): name = "1" if ns == "" else ns ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns) self.ns = ns def set_mtu(self, mtu, fail=True): return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu), fail=fail) def set_xdp(self, bpf, mode, force=False, JSON=True, verbose=False, fail=True, include_stderr=False): if verbose: bpf += " verbose" return ip("link set dev %s xdp%s %s" % (self.dev["ifname"], mode, bpf), force=force, JSON=JSON, fail=fail, include_stderr=include_stderr) def unset_xdp(self, mode, force=False, JSON=True, fail=True, include_stderr=False): return ip("link set dev %s xdp%s off" % (self.dev["ifname"], mode), force=force, JSON=JSON, fail=fail, include_stderr=include_stderr) def ip_link_show(self, xdp): _, link = ip("link show dev %s" % (self['ifname'])) if len(link) > 1: raise Exception("Multiple objects on ip link show") if len(link) < 1: return {} fail(xdp != "xdp" in link, "XDP program not reporting in iplink (reported %s, expected %s)" % ("xdp" in link, xdp)) return link[0] def tc_add_ingress(self): tc("qdisc add dev %s ingress" % (self['ifname'])) def tc_del_ingress(self): tc("qdisc del dev %s ingress" % (self['ifname'])) def tc_flush_filters(self, bound=0, total=0): self.tc_del_ingress() self.tc_add_ingress() self.wait_for_flush(bound=bound, total=total) def tc_show_ingress(self, expected=None): # No JSON support, oh well... flags = ["skip_sw", "skip_hw", "in_hw"] named = ["protocol", "pref", "chain", "handle", "id", "tag"] args = "-s filter show dev %s ingress" % (self['ifname']) _, out = tc(args, JSON=False) filters = [] lines = out.split('\n') for line in lines: words = line.split() if "handle" not in words: continue fltr = {} for flag in flags: fltr[flag] = flag in words for name in named: try: idx = words.index(name) fltr[name] = words[idx + 1] except ValueError: pass filters.append(fltr) if expected is not None: fail(len(filters) != expected, "%d ingress filters loaded, expected %d" % (len(filters), expected)) return filters def cls_filter_op(self, op, qdisc="ingress", prio=None, handle=None, chain=None, cls="", params="", fail=True, include_stderr=False): spec = "" if prio is not None: spec += " prio %d" % (prio) if handle: spec += " handle %s" % (handle) if chain is not None: spec += " chain %d" % (chain) return tc("filter {op} dev {dev} {qdisc} {spec} {cls} {params}"\ .format(op=op, dev=self['ifname'], qdisc=qdisc, spec=spec, cls=cls, params=params), fail=fail, include_stderr=include_stderr) def cls_bpf_add_filter(self, bpf, op="add", prio=None, handle=None, chain=None, da=False, verbose=False, skip_sw=False, skip_hw=False, fail=True, include_stderr=False): cls = "bpf " + bpf params = "" if da: params += " da" if verbose: params += " verbose" if skip_sw: params += " skip_sw" if skip_hw: params += " skip_hw" return self.cls_filter_op(op=op, prio=prio, handle=handle, cls=cls, chain=chain, params=params, fail=fail, include_stderr=include_stderr) def set_ethtool_tc_offloads(self, enable, fail=True): args = "hw-tc-offload %s" % ("on" if enable else "off") return ethtool(self, "-K", args, fail=fail) ################################################################################ def clean_up(): global files, netns, devs for dev in devs: dev.remove() for f in files: cmd("rm -f %s" % (f)) for ns in netns: cmd("ip netns delete %s" % (ns)) files = [] netns = [] def pin_prog(file_name, idx=0): progs = bpftool_prog_list(expected=(idx + 1)) prog = progs[idx] bpftool("prog pin id %d %s" % (prog["id"], file_name)) files.append(file_name) return file_name, bpf_pinned(file_name) def pin_map(file_name, idx=0, expected=1): maps = bpftool_map_list(expected=expected) m = maps[idx] bpftool("map pin id %d %s" % (m["id"], file_name)) files.append(file_name) return file_name, bpf_pinned(file_name) def check_dev_info_removed(prog_file=None, map_file=None): bpftool_prog_list(expected=0) ret, err = bpftool("prog show pin %s" % (prog_file), fail=False) fail(ret == 0, "Showing prog with removed device did not fail") fail(err["error"].find("No such device") == -1, "Showing prog with removed device expected ENODEV, error is %s" % (err["error"])) bpftool_map_list(expected=0) ret, err = bpftool("map show pin %s" % (map_file), fail=False) fail(ret == 0, "Showing map with removed device did not fail") fail(err["error"].find("No such device") == -1, "Showing map with removed device expected ENODEV, error is %s" % (err["error"])) def check_dev_info(other_ns, ns, prog_file=None, map_file=None, removed=False): progs = bpftool_prog_list(expected=1, ns=ns) prog = progs[0] fail("dev" not in prog.keys(), "Device parameters not reported") dev = prog["dev"] fail("ifindex" not in dev.keys(), "Device parameters not reported") fail("ns_dev" not in dev.keys(), "Device parameters not reported") fail("ns_inode" not in dev.keys(), "Device parameters not reported") if not other_ns: fail("ifname" not in dev.keys(), "Ifname not reported") fail(dev["ifname"] != sim["ifname"], "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"])) else: fail("ifname" in dev.keys(), "Ifname is reported for other ns") maps = bpftool_map_list(expected=2, ns=ns) for m in maps: fail("dev" not in m.keys(), "Device parameters not reported") fail(dev != m["dev"], "Map's device different than program's") def check_extack(output, reference, args): if skip_extack: return lines = output.split("\n") comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference fail(not comp, "Missing or incorrect netlink extack message") def check_extack_nsim(output, reference, args): check_extack(output, "netdevsim: " + reference, args) def check_no_extack(res, needle): fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"), "Found '%s' in command output, leaky extack?" % (needle)) def check_verifier_log(output, reference): lines = output.split("\n") for l in reversed(lines): if l == reference: return fail(True, "Missing or incorrect message from netdevsim in verifier log") def check_multi_basic(two_xdps): fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs") fail("prog" in two_xdps, "Base program reported in multi program mode") fail(len(two_xdps["attached"]) != 2, "Wrong attached program count with two programs") fail(two_xdps["attached"][0]["prog"]["id"] == two_xdps["attached"][1]["prog"]["id"], "Offloaded and other programs have the same id") def test_spurios_extack(sim, obj, skip_hw, needle): res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw, include_stderr=True) check_no_extack(res, needle) res = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_hw=skip_hw, include_stderr=True) check_no_extack(res, needle) res = sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf", include_stderr=True) check_no_extack(res, needle) def test_multi_prog(simdev, sim, obj, modename, modeid): start_test("Test multi-attachment XDP - %s + offload..." % (modename or "default", )) sim.set_xdp(obj, "offload") xdp = sim.ip_link_show(xdp=True)["xdp"] offloaded = sim.dfs_read("bpf_offloaded_id") fail("prog" not in xdp, "Base program not reported in single program mode") fail(len(xdp["attached"]) != 1, "Wrong attached program count with one program") sim.set_xdp(obj, modename) two_xdps = sim.ip_link_show(xdp=True)["xdp"] fail(xdp["attached"][0] not in two_xdps["attached"], "Offload program not reported after other activated") check_multi_basic(two_xdps) offloaded2 = sim.dfs_read("bpf_offloaded_id") fail(offloaded != offloaded2, "Offload ID changed after loading other program") start_test("Test multi-attachment XDP - replace...") ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True) fail(ret == 0, "Replaced one of programs without -force") check_extack(err, "XDP program already attached.", args) start_test("Test multi-attachment XDP - remove without mode...") ret, _, err = sim.unset_xdp("", force=True, fail=False, include_stderr=True) fail(ret == 0, "Removed program without a mode flag") check_extack(err, "More than one program loaded, unset mode is ambiguous.", args) sim.unset_xdp("offload") xdp = sim.ip_link_show(xdp=True)["xdp"] offloaded = sim.dfs_read("bpf_offloaded_id") fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs") fail("prog" not in xdp, "Base program not reported after multi program mode") fail(xdp["attached"][0] not in two_xdps["attached"], "Offload program not reported after other activated") fail(len(xdp["attached"]) != 1, "Wrong attached program count with remaining programs") fail(offloaded != "0", "Offload ID reported with only other program left") start_test("Test multi-attachment XDP - reattach...") sim.set_xdp(obj, "offload") two_xdps = sim.ip_link_show(xdp=True)["xdp"] fail(xdp["attached"][0] not in two_xdps["attached"], "Other program not reported after offload activated") check_multi_basic(two_xdps) start_test("Test multi-attachment XDP - device remove...") simdev.remove() simdev = NetdevSimDev() sim, = simdev.nsims sim.set_ethtool_tc_offloads(True) return [simdev, sim] # Parse command line parser = argparse.ArgumentParser() parser.add_argument("--log", help="output verbose log to given file") args = parser.parse_args() if args.log: logfile = open(args.log, 'w+') logfile.write("# -*-Org-*-") log("Prepare...", "", level=1) log_level_inc() # Check permissions skip(os.getuid() != 0, "test must be run as root") # Check tools ret, progs = bpftool("prog", fail=False) skip(ret != 0, "bpftool not installed") base_progs = progs _, base_maps = bpftool("map") base_map_names = [ 'pid_iter.rodata' # created on each bpftool invocation ] # Check netdevsim ret, out = cmd("modprobe netdevsim", fail=False) skip(ret != 0, "netdevsim module could not be loaded") # Check debugfs _, out = cmd("mount") if out.find("/sys/kernel/debug type debugfs") == -1: cmd("mount -t debugfs none /sys/kernel/debug") # Check samples are compiled samples = ["sample_ret0.bpf.o", "sample_map_ret0.bpf.o"] for s in samples: ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False) skip(ret != 0, "sample %s/%s not found, please compile it" % (bpf_test_dir, s)) # Check if iproute2 is built with libmnl (needed by extack support) _, _, err = cmd("tc qdisc delete dev lo handle 0", fail=False, include_stderr=True) if err.find("Error: Failed to find qdisc with specified handle.") == -1: print("Warning: no extack message in iproute2 output, libmnl missing?") log("Warning: no extack message in iproute2 output, libmnl missing?", "") skip_extack = True # Check if net namespaces seem to work ns = mknetns() skip(ns is None, "Could not create a net namespace") cmd("ip netns delete %s" % (ns)) netns = [] try: obj = bpf_obj("sample_ret0.bpf.o") bytecode = bpf_bytecode("1,6 0 0 4294967295,") start_test("Test destruction of generic XDP...") simdev = NetdevSimDev() sim, = simdev.nsims sim.set_xdp(obj, "generic") simdev.remove() bpftool_prog_list_wait(expected=0) simdev = NetdevSimDev() sim, = simdev.nsims sim.tc_add_ingress() start_test("Test TC non-offloaded...") ret, _ = sim.cls_bpf_add_filter(obj, skip_hw=True, fail=False) fail(ret != 0, "Software TC filter did not load") start_test("Test TC non-offloaded isn't getting bound...") ret, _ = sim.cls_bpf_add_filter(obj, fail=False) fail(ret != 0, "Software TC filter did not load") simdev.dfs_get_bound_progs(expected=0) sim.tc_flush_filters() start_test("Test TC offloads are off by default...") ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "TC filter loaded without enabling TC offloads") check_extack(err, "TC offload is disabled on net device.", args) sim.wait_for_flush() sim.set_ethtool_tc_offloads(True) sim.dfs["bpf_tc_non_bound_accept"] = "Y" start_test("Test TC offload by default...") ret, _ = sim.cls_bpf_add_filter(obj, fail=False) fail(ret != 0, "Software TC filter did not load") simdev.dfs_get_bound_progs(expected=0) ingress = sim.tc_show_ingress(expected=1) fltr = ingress[0] fail(not fltr["in_hw"], "Filter not offloaded by default") sim.tc_flush_filters() start_test("Test TC cBPF bytcode tries offload by default...") ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False) fail(ret != 0, "Software TC filter did not load") simdev.dfs_get_bound_progs(expected=0) ingress = sim.tc_show_ingress(expected=1) fltr = ingress[0] fail(not fltr["in_hw"], "Bytecode not offloaded by default") sim.tc_flush_filters() sim.dfs["bpf_tc_non_bound_accept"] = "N" start_test("Test TC cBPF unbound bytecode doesn't offload...") ret, _, err = sim.cls_bpf_add_filter(bytecode, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "TC bytecode loaded for offload") check_extack_nsim(err, "netdevsim configured to reject unbound programs.", args) sim.wait_for_flush() start_test("Test non-0 chain offload...") ret, _, err = sim.cls_bpf_add_filter(obj, chain=1, prio=1, handle=1, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "Offloaded a filter to chain other than 0") check_extack(err, "Driver supports only offload of chain 0.", args) sim.tc_flush_filters() start_test("Test TC replace...") sim.cls_bpf_add_filter(obj, prio=1, handle=1) sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1) sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf") sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_sw=True) sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_sw=True) sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf") sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=True) sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_hw=True) sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf") start_test("Test TC replace bad flags...") for i in range(3): for j in range(3): ret, _ = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_sw=(j == 1), skip_hw=(j == 2), fail=False) fail(bool(ret) != bool(j), "Software TC incorrect load in replace test, iteration %d" % (j)) sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf") start_test("Test spurious extack from the driver...") test_spurios_extack(sim, obj, False, "netdevsim") test_spurios_extack(sim, obj, True, "netdevsim") sim.set_ethtool_tc_offloads(False) test_spurios_extack(sim, obj, False, "TC offload is disabled") test_spurios_extack(sim, obj, True, "TC offload is disabled") sim.set_ethtool_tc_offloads(True) sim.tc_flush_filters() start_test("Test TC offloads failure...") sim.dfs["dev/bpf_bind_verifier_accept"] = 0 ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "TC filter did not reject with TC offloads enabled") check_verifier_log(err, "[netdevsim] Hello from netdevsim!") sim.dfs["dev/bpf_bind_verifier_accept"] = 1 start_test("Test TC offloads work...") ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True, fail=False, include_stderr=True) fail(ret != 0, "TC filter did not load with TC offloads enabled") start_test("Test TC offload basics...") dfs = simdev.dfs_get_bound_progs(expected=1) progs = bpftool_prog_list(expected=1) ingress = sim.tc_show_ingress(expected=1) dprog = dfs[0] prog = progs[0] fltr = ingress[0] fail(fltr["skip_hw"], "TC does reports 'skip_hw' on offloaded filter") fail(not fltr["in_hw"], "TC does not report 'in_hw' for offloaded filter") fail(not fltr["skip_sw"], "TC does not report 'skip_sw' back") start_test("Test TC offload is device-bound...") fail(str(prog["id"]) != fltr["id"], "Program IDs don't match") fail(prog["tag"] != fltr["tag"], "Program tags don't match") fail(fltr["id"] != dprog["id"], "Program IDs don't match") fail(dprog["state"] != "xlated", "Offloaded program state not translated") fail(dprog["loaded"] != "Y", "Offloaded program is not loaded") start_test("Test disabling TC offloads is rejected while filters installed...") ret, _ = sim.set_ethtool_tc_offloads(False, fail=False) fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...") sim.set_ethtool_tc_offloads(True) start_test("Test qdisc removal frees things...") sim.tc_flush_filters() sim.tc_show_ingress(expected=0) start_test("Test disabling TC offloads is OK without filters...") ret, _ = sim.set_ethtool_tc_offloads(False, fail=False) fail(ret != 0, "Driver refused to disable TC offloads without filters installed...") sim.set_ethtool_tc_offloads(True) start_test("Test destroying device gets rid of TC filters...") sim.cls_bpf_add_filter(obj, skip_sw=True) simdev.remove() bpftool_prog_list_wait(expected=0) simdev = NetdevSimDev() sim, = simdev.nsims sim.set_ethtool_tc_offloads(True) start_test("Test destroying device gets rid of XDP...") sim.set_xdp(obj, "offload") simdev.remove() bpftool_prog_list_wait(expected=0) simdev = NetdevSimDev() sim, = simdev.nsims sim.set_ethtool_tc_offloads(True) start_test("Test XDP prog reporting...") sim.set_xdp(obj, "drv") ipl = sim.ip_link_show(xdp=True) progs = bpftool_prog_list(expected=1) fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"], "Loaded program has wrong ID") start_test("Test XDP prog replace without force...") ret, _ = sim.set_xdp(obj, "drv", fail=False) fail(ret == 0, "Replaced XDP program without -force") sim.wait_for_flush(total=1) start_test("Test XDP prog replace with force...") ret, _ = sim.set_xdp(obj, "drv", force=True, fail=False) fail(ret != 0, "Could not replace XDP program with -force") bpftool_prog_list_wait(expected=1) ipl = sim.ip_link_show(xdp=True) progs = bpftool_prog_list(expected=1) fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"], "Loaded program has wrong ID") fail("dev" in progs[0].keys(), "Device parameters reported for non-offloaded program") start_test("Test XDP prog replace with bad flags...") ret, _, err = sim.set_xdp(obj, "generic", force=True, fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") check_extack(err, "Native and generic XDP can't be active at the same time.", args) start_test("Test MTU restrictions...") ret, _ = sim.set_mtu(9000, fail=False) fail(ret == 0, "Driver should refuse to increase MTU to 9000 with XDP loaded...") sim.unset_xdp("drv") bpftool_prog_list_wait(expected=0) sim.set_mtu(9000) ret, _, err = sim.set_xdp(obj, "drv", fail=False, include_stderr=True) fail(ret == 0, "Driver should refuse to load program with MTU of 9000...") check_extack_nsim(err, "MTU too large w/ XDP enabled.", args) sim.set_mtu(1500) sim.wait_for_flush() start_test("Test non-offload XDP attaching to HW...") bpftool_prog_load("sample_ret0.bpf.o", "/sys/fs/bpf/nooffload") nooffload = bpf_pinned("/sys/fs/bpf/nooffload") ret, _, err = sim.set_xdp(nooffload, "offload", fail=False, include_stderr=True) fail(ret == 0, "attached non-offloaded XDP program to HW") check_extack_nsim(err, "xdpoffload of non-bound program.", args) rm("/sys/fs/bpf/nooffload") start_test("Test offload XDP attaching to drv...") bpftool_prog_load("sample_ret0.bpf.o", "/sys/fs/bpf/offload", dev=sim['ifname']) offload = bpf_pinned("/sys/fs/bpf/offload") ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True) fail(ret == 0, "attached offloaded XDP program to drv") check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args) rm("/sys/fs/bpf/offload") sim.wait_for_flush() start_test("Test XDP load failure...") sim.dfs["dev/bpf_bind_verifier_accept"] = 0 ret, _, err = bpftool_prog_load("sample_ret0.bpf.o", "/sys/fs/bpf/offload", dev=sim['ifname'], fail=False, include_stderr=True) fail(ret == 0, "verifier should fail on load") check_verifier_log(err, "[netdevsim] Hello from netdevsim!") sim.dfs["dev/bpf_bind_verifier_accept"] = 1 sim.wait_for_flush() start_test("Test XDP offload...") _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True) ipl = sim.ip_link_show(xdp=True) link_xdp = ipl["xdp"]["prog"] progs = bpftool_prog_list(expected=1) prog = progs[0] fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID") start_test("Test XDP offload is device bound...") dfs = simdev.dfs_get_bound_progs(expected=1) dprog = dfs[0] fail(prog["id"] != link_xdp["id"], "Program IDs don't match") fail(prog["tag"] != link_xdp["tag"], "Program tags don't match") fail(str(link_xdp["id"]) != dprog["id"], "Program IDs don't match") fail(dprog["state"] != "xlated", "Offloaded program state not translated") fail(dprog["loaded"] != "Y", "Offloaded program is not loaded") start_test("Test removing XDP program many times...") sim.unset_xdp("offload") sim.unset_xdp("offload") sim.unset_xdp("drv") sim.unset_xdp("drv") sim.unset_xdp("") sim.unset_xdp("") bpftool_prog_list_wait(expected=0) start_test("Test attempt to use a program for a wrong device...") simdev2 = NetdevSimDev() sim2, = simdev2.nsims sim2.set_xdp(obj, "offload") pin_file, pinned = pin_prog("/sys/fs/bpf/tmp") ret, _, err = sim.set_xdp(pinned, "offload", fail=False, include_stderr=True) fail(ret == 0, "Pinned program loaded for a different device accepted") check_extack_nsim(err, "program bound to different dev.", args) simdev2.remove() ret, _, err = sim.set_xdp(pinned, "offload", fail=False, include_stderr=True) fail(ret == 0, "Pinned program loaded for a removed device accepted") check_extack_nsim(err, "xdpoffload of non-bound program.", args) rm(pin_file) bpftool_prog_list_wait(expected=0) simdev, sim = test_multi_prog(simdev, sim, obj, "", 1) simdev, sim = test_multi_prog(simdev, sim, obj, "drv", 1) simdev, sim = test_multi_prog(simdev, sim, obj, "generic", 2) start_test("Test mixing of TC and XDP...") sim.tc_add_ingress() sim.set_xdp(obj, "offload") ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "Loading TC when XDP active should fail") check_extack_nsim(err, "driver and netdev offload states mismatch.", args) sim.unset_xdp("offload") sim.wait_for_flush() sim.cls_bpf_add_filter(obj, skip_sw=True) ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True) fail(ret == 0, "Loading XDP when TC active should fail") check_extack_nsim(err, "TC program is already loaded.", args) start_test("Test binding TC from pinned...") pin_file, pinned = pin_prog("/sys/fs/bpf/tmp") sim.tc_flush_filters(bound=1, total=1) sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True) sim.tc_flush_filters(bound=1, total=1) start_test("Test binding XDP from pinned...") sim.set_xdp(obj, "offload") pin_file, pinned = pin_prog("/sys/fs/bpf/tmp2", idx=1) sim.set_xdp(pinned, "offload", force=True) sim.unset_xdp("offload") sim.set_xdp(pinned, "offload", force=True) sim.unset_xdp("offload") start_test("Test offload of wrong type fails...") ret, _ = sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True, fail=False) fail(ret == 0, "Managed to attach XDP program to TC") start_test("Test asking for TC offload of two filters...") sim.cls_bpf_add_filter(obj, da=True, skip_sw=True) ret, _, err = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "Managed to offload two TC filters at the same time") check_extack_nsim(err, "driver and netdev offload states mismatch.", args) sim.tc_flush_filters(bound=2, total=2) start_test("Test if netdev removal waits for translation...") delay_msec = 500 sim.dfs["dev/bpf_bind_verifier_delay"] = delay_msec start = time.time() cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \ (sim['ifname'], obj) tc_proc = cmd(cmd_line, background=True, fail=False) # Wait for the verifier to start while simdev.dfs_num_bound_progs() <= 2: pass simdev.remove() end = time.time() ret, _ = cmd_result(tc_proc, fail=False) time_diff = end - start log("Time", "start:\t%s\nend:\t%s\ndiff:\t%s" % (start, end, time_diff)) fail(ret == 0, "Managed to load TC filter on a unregistering device") delay_sec = delay_msec * 0.001 fail(time_diff < delay_sec, "Removal process took %s, expected %s" % (time_diff, delay_sec)) # Remove all pinned files and reinstantiate the netdev clean_up() bpftool_prog_list_wait(expected=0) simdev = NetdevSimDev() sim, = simdev.nsims map_obj = bpf_obj("sample_map_ret0.bpf.o") start_test("Test loading program with maps...") sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON start_test("Test bpftool bound info reporting (own ns)...") check_dev_info(False, "") start_test("Test bpftool bound info reporting (other ns)...") ns = mknetns() sim.set_ns(ns) check_dev_info(True, "") start_test("Test bpftool bound info reporting (remote ns)...") check_dev_info(False, ns) start_test("Test bpftool bound info reporting (back to own ns)...") sim.set_ns("") check_dev_info(False, "") prog_file, _ = pin_prog("/sys/fs/bpf/tmp_prog") map_file, _ = pin_map("/sys/fs/bpf/tmp_map", idx=1, expected=2) simdev.remove() start_test("Test bpftool bound info reporting (removed dev)...") check_dev_info_removed(prog_file=prog_file, map_file=map_file) # Remove all pinned files and reinstantiate the netdev clean_up() bpftool_prog_list_wait(expected=0) simdev = NetdevSimDev() sim, = simdev.nsims start_test("Test map update (no flags)...") sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON maps = bpftool_map_list(expected=2) array = maps[0] if maps[0]["type"] == "array" else maps[1] htab = maps[0] if maps[0]["type"] == "hash" else maps[1] for m in maps: for i in range(2): bpftool("map update id %d key %s value %s" % (m["id"], int2str("I", i), int2str("Q", i * 3))) for m in maps: ret, _ = bpftool("map update id %d key %s value %s" % (m["id"], int2str("I", 3), int2str("Q", 3 * 3)), fail=False) fail(ret == 0, "added too many entries") start_test("Test map update (exists)...") for m in maps: for i in range(2): bpftool("map update id %d key %s value %s exist" % (m["id"], int2str("I", i), int2str("Q", i * 3))) for m in maps: ret, err = bpftool("map update id %d key %s value %s exist" % (m["id"], int2str("I", 3), int2str("Q", 3 * 3)), fail=False) fail(ret == 0, "updated non-existing key") fail(err["error"].find("No such file or directory") == -1, "expected ENOENT, error is '%s'" % (err["error"])) start_test("Test map update (noexist)...") for m in maps: for i in range(2): ret, err = bpftool("map update id %d key %s value %s noexist" % (m["id"], int2str("I", i), int2str("Q", i * 3)), fail=False) fail(ret == 0, "updated existing key") fail(err["error"].find("File exists") == -1, "expected EEXIST, error is '%s'" % (err["error"])) start_test("Test map dump...") for m in maps: _, entries = bpftool("map dump id %d" % (m["id"])) for i in range(2): key = str2int(entries[i]["key"]) fail(key != i, "expected key %d, got %d" % (key, i)) val = str2int(entries[i]["value"]) fail(val != i * 3, "expected value %d, got %d" % (val, i * 3)) start_test("Test map getnext...") for m in maps: _, entry = bpftool("map getnext id %d" % (m["id"])) key = str2int(entry["next_key"]) fail(key != 0, "next key %d, expected %d" % (key, 0)) _, entry = bpftool("map getnext id %d key %s" % (m["id"], int2str("I", 0))) key = str2int(entry["next_key"]) fail(key != 1, "next key %d, expected %d" % (key, 1)) ret, err = bpftool("map getnext id %d key %s" % (m["id"], int2str("I", 1)), fail=False) fail(ret == 0, "got next key past the end of map") fail(err["error"].find("No such file or directory") == -1, "expected ENOENT, error is '%s'" % (err["error"])) start_test("Test map delete (htab)...") for i in range(2): bpftool("map delete id %d key %s" % (htab["id"], int2str("I", i))) start_test("Test map delete (array)...") for i in range(2): ret, err = bpftool("map delete id %d key %s" % (htab["id"], int2str("I", i)), fail=False) fail(ret == 0, "removed entry from an array") fail(err["error"].find("No such file or directory") == -1, "expected ENOENT, error is '%s'" % (err["error"])) start_test("Test map remove...") sim.unset_xdp("offload") bpftool_map_list_wait(expected=0) simdev.remove() simdev = NetdevSimDev() sim, = simdev.nsims sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON simdev.remove() bpftool_map_list_wait(expected=0) start_test("Test map creation fail path...") simdev = NetdevSimDev() sim, = simdev.nsims sim.dfs["bpf_map_accept"] = "N" ret, _ = sim.set_xdp(map_obj, "offload", JSON=False, fail=False) fail(ret == 0, "netdevsim didn't refuse to create a map with offload disabled") simdev.remove() start_test("Test multi-dev ASIC program reuse...") simdevA = NetdevSimDev() simA, = simdevA.nsims simdevB = NetdevSimDev(3) simB1, simB2, simB3 = simdevB.nsims sims = (simA, simB1, simB2, simB3) simB = (simB1, simB2, simB3) bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimA", dev=simA['ifname']) progA = bpf_pinned("/sys/fs/bpf/nsimA") bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimB", dev=simB1['ifname']) progB = bpf_pinned("/sys/fs/bpf/nsimB") simA.set_xdp(progA, "offload", JSON=False) for d in simdevB.nsims: d.set_xdp(progB, "offload", JSON=False) start_test("Test multi-dev ASIC cross-dev replace...") ret, _ = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False) fail(ret == 0, "cross-ASIC program allowed") for d in simdevB.nsims: ret, _ = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False) fail(ret == 0, "cross-ASIC program allowed") start_test("Test multi-dev ASIC cross-dev install...") for d in sims: d.unset_xdp("offload") ret, _, err = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False, include_stderr=True) fail(ret == 0, "cross-ASIC program allowed") check_extack_nsim(err, "program bound to different dev.", args) for d in simdevB.nsims: ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False, include_stderr=True) fail(ret == 0, "cross-ASIC program allowed") check_extack_nsim(err, "program bound to different dev.", args) start_test("Test multi-dev ASIC cross-dev map reuse...") mapA = bpftool("prog show %s" % (progA))[1]["map_ids"][0] mapB = bpftool("prog show %s" % (progB))[1]["map_ids"][0] ret, _ = bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimB_", dev=simB3['ifname'], maps=["idx 0 id %d" % (mapB)], fail=False) fail(ret != 0, "couldn't reuse a map on the same ASIC") rm("/sys/fs/bpf/nsimB_") ret, _, err = bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimA_", dev=simA['ifname'], maps=["idx 0 id %d" % (mapB)], fail=False, include_stderr=True) fail(ret == 0, "could reuse a map on a different ASIC") fail(err.count("offload device mismatch between prog and map") == 0, "error message missing for cross-ASIC map") ret, _, err = bpftool_prog_load("sample_map_ret0.bpf.o", "/sys/fs/bpf/nsimB_", dev=simB1['ifname'], maps=["idx 0 id %d" % (mapA)], fail=False, include_stderr=True) fail(ret == 0, "could reuse a map on a different ASIC") fail(err.count("offload device mismatch between prog and map") == 0, "error message missing for cross-ASIC map") start_test("Test multi-dev ASIC cross-dev destruction...") bpftool_prog_list_wait(expected=2) simdevA.remove() bpftool_prog_list_wait(expected=1) ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] fail(ifnameB != simB1['ifname'], "program not bound to original device") simB1.remove() bpftool_prog_list_wait(expected=1) start_test("Test multi-dev ASIC cross-dev destruction - move...") ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] fail(ifnameB not in (simB2['ifname'], simB3['ifname']), "program not bound to remaining devices") simB2.remove() ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] fail(ifnameB != simB3['ifname'], "program not bound to remaining device") simB3.remove() simdevB.remove() bpftool_prog_list_wait(expected=0) start_test("Test multi-dev ASIC cross-dev destruction - orphaned...") ret, out = bpftool("prog show %s" % (progB), fail=False) fail(ret == 0, "got information about orphaned program") fail("error" not in out, "no error reported for get info on orphaned") fail(out["error"] != "can't get prog info: No such device", "wrong error for get info on orphaned") print("%s: OK" % (os.path.basename(__file__))) finally: log("Clean up...", "", level=1) log_level_inc() clean_up()
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/bpf/test_offload.py
# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2020 SUSE LLC. import collections import functools import json import os import socket import subprocess import unittest # Add the source tree of bpftool and /usr/local/sbin to PATH cur_dir = os.path.dirname(os.path.realpath(__file__)) bpftool_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "..", "..", "tools", "bpf", "bpftool")) os.environ["PATH"] = bpftool_dir + ":/usr/local/sbin:" + os.environ["PATH"] class IfaceNotFoundError(Exception): pass class UnprivilegedUserError(Exception): pass def _bpftool(args, json=True): _args = ["bpftool"] if json: _args.append("-j") _args.extend(args) return subprocess.check_output(_args) def bpftool(args): return _bpftool(args, json=False).decode("utf-8") def bpftool_json(args): res = _bpftool(args) return json.loads(res) def get_default_iface(): for iface in socket.if_nameindex(): if iface[1] != "lo": return iface[1] raise IfaceNotFoundError("Could not find any network interface to probe") def default_iface(f): @functools.wraps(f) def wrapper(*args, **kwargs): iface = get_default_iface() return f(*args, iface, **kwargs) return wrapper DMESG_EMITTING_HELPERS = [ "bpf_probe_write_user", "bpf_trace_printk", "bpf_trace_vprintk", ] class TestBpftool(unittest.TestCase): @classmethod def setUpClass(cls): if os.getuid() != 0: raise UnprivilegedUserError( "This test suite needs root privileges") @default_iface def test_feature_dev_json(self, iface): unexpected_helpers = DMESG_EMITTING_HELPERS expected_keys = [ "syscall_config", "program_types", "map_types", "helpers", "misc", ] res = bpftool_json(["feature", "probe", "dev", iface]) # Check if the result has all expected keys. self.assertCountEqual(res.keys(), expected_keys) # Check if unexpected helpers are not included in helpers probes # result. for helpers in res["helpers"].values(): for unexpected_helper in unexpected_helpers: self.assertNotIn(unexpected_helper, helpers) def test_feature_kernel(self): test_cases = [ bpftool_json(["feature", "probe", "kernel"]), bpftool_json(["feature", "probe"]), bpftool_json(["feature"]), ] unexpected_helpers = DMESG_EMITTING_HELPERS expected_keys = [ "syscall_config", "system_config", "program_types", "map_types", "helpers", "misc", ] for tc in test_cases: # Check if the result has all expected keys. self.assertCountEqual(tc.keys(), expected_keys) # Check if unexpected helpers are not included in helpers probes # result. for helpers in tc["helpers"].values(): for unexpected_helper in unexpected_helpers: self.assertNotIn(unexpected_helper, helpers) def test_feature_kernel_full(self): test_cases = [ bpftool_json(["feature", "probe", "kernel", "full"]), bpftool_json(["feature", "probe", "full"]), ] expected_helpers = DMESG_EMITTING_HELPERS for tc in test_cases: # Check if expected helpers are included at least once in any # helpers list for any program type. Unfortunately we cannot assume # that they will be included in all program types or a specific # subset of programs. It depends on the kernel version and # configuration. found_helpers = False for helpers in tc["helpers"].values(): if all(expected_helper in helpers for expected_helper in expected_helpers): found_helpers = True break self.assertTrue(found_helpers) def test_feature_kernel_full_vs_not_full(self): full_res = bpftool_json(["feature", "probe", "full"]) not_full_res = bpftool_json(["feature", "probe"]) not_full_set = set() full_set = set() for helpers in full_res["helpers"].values(): for helper in helpers: full_set.add(helper) for helpers in not_full_res["helpers"].values(): for helper in helpers: not_full_set.add(helper) self.assertCountEqual(full_set - not_full_set, set(DMESG_EMITTING_HELPERS)) self.assertCountEqual(not_full_set - full_set, set()) def test_feature_macros(self): expected_patterns = [ r"/\*\*\* System call availability \*\*\*/", r"#define HAVE_BPF_SYSCALL", r"/\*\*\* eBPF program types \*\*\*/", r"#define HAVE.*PROG_TYPE", r"/\*\*\* eBPF map types \*\*\*/", r"#define HAVE.*MAP_TYPE", r"/\*\*\* eBPF helper functions \*\*\*/", r"#define HAVE.*HELPER", r"/\*\*\* eBPF misc features \*\*\*/", ] res = bpftool(["feature", "probe", "macros"]) for pattern in expected_patterns: self.assertRegex(res, pattern)
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/bpf/test_bpftool.py
#!/usr/bin/env python3 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) # # Copyright (C) 2021 Isovalent, Inc. import argparse import re import os, sys LINUX_ROOT = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir)) BPFTOOL_DIR = os.getenv('BPFTOOL_DIR', os.path.join(LINUX_ROOT, 'tools/bpf/bpftool')) BPFTOOL_BASHCOMP_DIR = os.getenv('BPFTOOL_BASHCOMP_DIR', os.path.join(BPFTOOL_DIR, 'bash-completion')) BPFTOOL_DOC_DIR = os.getenv('BPFTOOL_DOC_DIR', os.path.join(BPFTOOL_DIR, 'Documentation')) INCLUDE_DIR = os.getenv('INCLUDE_DIR', os.path.join(LINUX_ROOT, 'tools/include')) retval = 0 class BlockParser(object): """ A parser for extracting set of values from blocks such as enums. @reader: a pointer to the open file to parse """ def __init__(self, reader): self.reader = reader def search_block(self, start_marker): """ Search for a given structure in a file. @start_marker: regex marking the beginning of a structure to parse """ offset = self.reader.tell() array_start = re.search(start_marker, self.reader.read()) if array_start is None: raise Exception('Failed to find start of block') self.reader.seek(offset + array_start.start()) def parse(self, pattern, end_marker): """ Parse a block and return a set of values. Values to extract must be on separate lines in the file. @pattern: pattern used to identify the values to extract @end_marker: regex marking the end of the block to parse """ entries = set() while True: line = self.reader.readline() if not line or re.match(end_marker, line): break capture = pattern.search(line) if capture and pattern.groups >= 1: entries.add(capture.group(1)) return entries class ArrayParser(BlockParser): """ A parser for extracting a set of values from some BPF-related arrays. @reader: a pointer to the open file to parse @array_name: name of the array to parse """ end_marker = re.compile('^};') def __init__(self, reader, array_name): self.array_name = array_name self.start_marker = re.compile(f'(static )?const bool {self.array_name}\[.*\] = {{\n') super().__init__(reader) def search_block(self): """ Search for the given array in a file. """ super().search_block(self.start_marker); def parse(self): """ Parse a block and return data as a dictionary. Items to extract must be on separate lines in the file. """ pattern = re.compile('\[(BPF_\w*)\]\s*= (true|false),?$') entries = set() while True: line = self.reader.readline() if line == '' or re.match(self.end_marker, line): break capture = pattern.search(line) if capture: entries |= {capture.group(1)} return entries class InlineListParser(BlockParser): """ A parser for extracting set of values from inline lists. """ def parse(self, pattern, end_marker): """ Parse a block and return a set of values. Multiple values to extract can be on a same line in the file. @pattern: pattern used to identify the values to extract @end_marker: regex marking the end of the block to parse """ entries = set() while True: line = self.reader.readline() if not line: break entries.update(pattern.findall(line)) if re.search(end_marker, line): break return entries class FileExtractor(object): """ A generic reader for extracting data from a given file. This class contains several helper methods that wrap around parser objects to extract values from different structures. This class does not offer a way to set a filename, which is expected to be defined in children classes. """ def __init__(self): self.reader = open(self.filename, 'r') def close(self): """ Close the file used by the parser. """ self.reader.close() def reset_read(self): """ Reset the file position indicator for this parser. This is useful when parsing several structures in the file without respecting the order in which those structures appear in the file. """ self.reader.seek(0) def get_types_from_array(self, array_name): """ Search for and parse a list of allowed BPF_* enum members, for example: const bool prog_type_name[] = { [BPF_PROG_TYPE_UNSPEC] = true, [BPF_PROG_TYPE_SOCKET_FILTER] = true, [BPF_PROG_TYPE_KPROBE] = true, }; Return a set of the enum members, for example: {'BPF_PROG_TYPE_UNSPEC', 'BPF_PROG_TYPE_SOCKET_FILTER', 'BPF_PROG_TYPE_KPROBE'} @array_name: name of the array to parse """ array_parser = ArrayParser(self.reader, array_name) array_parser.search_block() return array_parser.parse() def get_enum(self, enum_name): """ Search for and parse an enum containing BPF_* members, for example: enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_KPROBE, }; Return a set containing all member names, for example: {'BPF_PROG_TYPE_UNSPEC', 'BPF_PROG_TYPE_SOCKET_FILTER', 'BPF_PROG_TYPE_KPROBE'} @enum_name: name of the enum to parse """ start_marker = re.compile(f'enum {enum_name} {{\n') pattern = re.compile('^\s*(BPF_\w+),?(\s+/\*.*\*/)?$') end_marker = re.compile('^};') parser = BlockParser(self.reader) parser.search_block(start_marker) return parser.parse(pattern, end_marker) def make_enum_map(self, names, enum_prefix): """ Search for and parse an enum containing BPF_* members, just as get_enum does. However, instead of just returning a set of the variant names, also generate a textual representation from them by (assuming and) removing a provided prefix and lowercasing the remainder. Then return a dict mapping from name to textual representation. @enum_values: a set of enum values; e.g., as retrieved by get_enum @enum_prefix: the prefix to remove from each of the variants to infer textual representation """ mapping = {} for name in names: if not name.startswith(enum_prefix): raise Exception(f"enum variant {name} does not start with {enum_prefix}") text = name[len(enum_prefix):].lower() mapping[name] = text return mapping def __get_description_list(self, start_marker, pattern, end_marker): parser = InlineListParser(self.reader) parser.search_block(start_marker) return parser.parse(pattern, end_marker) def get_rst_list(self, block_name): """ Search for and parse a list of type names from RST documentation, for example: | *TYPE* := { | **socket** | **kprobe** | | **kretprobe** | } Return a set containing all type names, for example: {'socket', 'kprobe', 'kretprobe'} @block_name: name of the blog to parse, 'TYPE' in the example """ start_marker = re.compile(f'\*{block_name}\* := {{') pattern = re.compile('\*\*([\w/-]+)\*\*') end_marker = re.compile('}\n') return self.__get_description_list(start_marker, pattern, end_marker) def get_help_list(self, block_name): """ Search for and parse a list of type names from a help message in bpftool, for example: " TYPE := { socket | kprobe |\\n" " kretprobe }\\n" Return a set containing all type names, for example: {'socket', 'kprobe', 'kretprobe'} @block_name: name of the blog to parse, 'TYPE' in the example """ start_marker = re.compile(f'"\s*{block_name} := {{') pattern = re.compile('([\w/]+) [|}]') end_marker = re.compile('}') return self.__get_description_list(start_marker, pattern, end_marker) def get_help_list_macro(self, macro): """ Search for and parse a list of values from a help message starting with a macro in bpftool, for example: " " HELP_SPEC_OPTIONS " |\\n" " {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} }\\n" Return a set containing all item names, for example: {'-f', '--bpffs', '-m', '--mapcompat', '-n', '--nomount'} @macro: macro starting the block, 'HELP_SPEC_OPTIONS' in the example """ start_marker = re.compile(f'"\s*{macro}\s*" [|}}]') pattern = re.compile('([\w-]+) ?(?:\||}[ }\]])') end_marker = re.compile('}\\\\n') return self.__get_description_list(start_marker, pattern, end_marker) def get_bashcomp_list(self, block_name): """ Search for and parse a list of type names from a variable in bash completion file, for example: local BPFTOOL_PROG_LOAD_TYPES='socket kprobe \\ kretprobe' Return a set containing all type names, for example: {'socket', 'kprobe', 'kretprobe'} @block_name: name of the blog to parse, 'TYPE' in the example """ start_marker = re.compile(f'local {block_name}=\'') pattern = re.compile('(?:.*=\')?([\w/]+)') end_marker = re.compile('\'$') return self.__get_description_list(start_marker, pattern, end_marker) class SourceFileExtractor(FileExtractor): """ An abstract extractor for a source file with usage message. This class does not offer a way to set a filename, which is expected to be defined in children classes. """ def get_options(self): return self.get_help_list_macro('HELP_SPEC_OPTIONS') class MainHeaderFileExtractor(SourceFileExtractor): """ An extractor for bpftool's main.h """ filename = os.path.join(BPFTOOL_DIR, 'main.h') def get_common_options(self): """ Parse the list of common options in main.h (options that apply to all commands), which looks to the lists of options in other source files but has different start and end markers: "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}" Return a set containing all options, such as: {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'} """ start_marker = re.compile(f'"OPTIONS :=') pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])') end_marker = re.compile('#define') parser = InlineListParser(self.reader) parser.search_block(start_marker) return parser.parse(pattern, end_marker) class ManSubstitutionsExtractor(SourceFileExtractor): """ An extractor for substitutions.rst """ filename = os.path.join(BPFTOOL_DOC_DIR, 'substitutions.rst') def get_common_options(self): """ Parse the list of common options in substitutions.rst (options that apply to all commands). Return a set containing all options, such as: {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'} """ start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {') pattern = re.compile('\*\*([\w/-]+)\*\*') end_marker = re.compile('}$') parser = InlineListParser(self.reader) parser.search_block(start_marker) return parser.parse(pattern, end_marker) class ProgFileExtractor(SourceFileExtractor): """ An extractor for bpftool's prog.c. """ filename = os.path.join(BPFTOOL_DIR, 'prog.c') def get_attach_types(self): types = self.get_types_from_array('attach_types') return self.make_enum_map(types, 'BPF_') def get_prog_attach_help(self): return self.get_help_list('ATTACH_TYPE') class MapFileExtractor(SourceFileExtractor): """ An extractor for bpftool's map.c. """ filename = os.path.join(BPFTOOL_DIR, 'map.c') def get_map_help(self): return self.get_help_list('TYPE') class CgroupFileExtractor(SourceFileExtractor): """ An extractor for bpftool's cgroup.c. """ filename = os.path.join(BPFTOOL_DIR, 'cgroup.c') def get_prog_attach_help(self): return self.get_help_list('ATTACH_TYPE') class GenericSourceExtractor(SourceFileExtractor): """ An extractor for generic source code files. """ filename = "" def __init__(self, filename): self.filename = os.path.join(BPFTOOL_DIR, filename) super().__init__() class BpfHeaderExtractor(FileExtractor): """ An extractor for the UAPI BPF header. """ filename = os.path.join(INCLUDE_DIR, 'uapi/linux/bpf.h') def __init__(self): super().__init__() self.attach_types = {} def get_prog_types(self): return self.get_enum('bpf_prog_type') def get_map_type_map(self): names = self.get_enum('bpf_map_type') return self.make_enum_map(names, 'BPF_MAP_TYPE_') def get_attach_type_map(self): if not self.attach_types: names = self.get_enum('bpf_attach_type') self.attach_types = self.make_enum_map(names, 'BPF_') return self.attach_types def get_cgroup_attach_type_map(self): if not self.attach_types: self.get_attach_type_map() return {name: text for name, text in self.attach_types.items() if name.startswith('BPF_CGROUP')} class ManPageExtractor(FileExtractor): """ An abstract extractor for an RST documentation page. This class does not offer a way to set a filename, which is expected to be defined in children classes. """ def get_options(self): return self.get_rst_list('OPTIONS') class ManProgExtractor(ManPageExtractor): """ An extractor for bpftool-prog.rst. """ filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-prog.rst') def get_attach_types(self): return self.get_rst_list('ATTACH_TYPE') class ManMapExtractor(ManPageExtractor): """ An extractor for bpftool-map.rst. """ filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-map.rst') def get_map_types(self): return self.get_rst_list('TYPE') class ManCgroupExtractor(ManPageExtractor): """ An extractor for bpftool-cgroup.rst. """ filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-cgroup.rst') def get_attach_types(self): return self.get_rst_list('ATTACH_TYPE') class ManGenericExtractor(ManPageExtractor): """ An extractor for generic RST documentation pages. """ filename = "" def __init__(self, filename): self.filename = os.path.join(BPFTOOL_DIR, filename) super().__init__() class BashcompExtractor(FileExtractor): """ An extractor for bpftool's bash completion file. """ filename = os.path.join(BPFTOOL_BASHCOMP_DIR, 'bpftool') def get_prog_attach_types(self): return self.get_bashcomp_list('BPFTOOL_PROG_ATTACH_TYPES') def verify(first_set, second_set, message): """ Print all values that differ between two sets. @first_set: one set to compare @second_set: another set to compare @message: message to print for values belonging to only one of the sets """ global retval diff = first_set.symmetric_difference(second_set) if diff: print(message, diff) retval = 1 def main(): # No arguments supported at this time, but print usage for -h|--help argParser = argparse.ArgumentParser(description=""" Verify that bpftool's code, help messages, documentation and bash completion are all in sync on program types, map types, attach types, and options. Also check that bpftool is in sync with the UAPI BPF header. """) args = argParser.parse_args() bpf_info = BpfHeaderExtractor() # Map types (names) map_info = MapFileExtractor() source_map_types = set(bpf_info.get_map_type_map().values()) source_map_types.discard('unspec') help_map_types = map_info.get_map_help() help_map_options = map_info.get_options() map_info.close() man_map_info = ManMapExtractor() man_map_options = man_map_info.get_options() man_map_types = man_map_info.get_map_types() man_map_info.close() verify(source_map_types, help_map_types, f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {MapFileExtractor.filename} (do_help() TYPE):') verify(source_map_types, man_map_types, f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {ManMapExtractor.filename} (TYPE):') verify(help_map_options, man_map_options, f'Comparing {MapFileExtractor.filename} (do_help() OPTIONS) and {ManMapExtractor.filename} (OPTIONS):') # Attach types (names) prog_info = ProgFileExtractor() source_prog_attach_types = set(prog_info.get_attach_types().values()) help_prog_attach_types = prog_info.get_prog_attach_help() help_prog_options = prog_info.get_options() prog_info.close() man_prog_info = ManProgExtractor() man_prog_options = man_prog_info.get_options() man_prog_attach_types = man_prog_info.get_attach_types() man_prog_info.close() bashcomp_info = BashcompExtractor() bashcomp_prog_attach_types = bashcomp_info.get_prog_attach_types() bashcomp_info.close() verify(source_prog_attach_types, help_prog_attach_types, f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {ProgFileExtractor.filename} (do_help() ATTACH_TYPE):') verify(source_prog_attach_types, man_prog_attach_types, f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {ManProgExtractor.filename} (ATTACH_TYPE):') verify(help_prog_options, man_prog_options, f'Comparing {ProgFileExtractor.filename} (do_help() OPTIONS) and {ManProgExtractor.filename} (OPTIONS):') verify(source_prog_attach_types, bashcomp_prog_attach_types, f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {BashcompExtractor.filename} (BPFTOOL_PROG_ATTACH_TYPES):') # Cgroup attach types source_cgroup_attach_types = set(bpf_info.get_cgroup_attach_type_map().values()) bpf_info.close() cgroup_info = CgroupFileExtractor() help_cgroup_attach_types = cgroup_info.get_prog_attach_help() help_cgroup_options = cgroup_info.get_options() cgroup_info.close() man_cgroup_info = ManCgroupExtractor() man_cgroup_options = man_cgroup_info.get_options() man_cgroup_attach_types = man_cgroup_info.get_attach_types() man_cgroup_info.close() verify(source_cgroup_attach_types, help_cgroup_attach_types, f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {CgroupFileExtractor.filename} (do_help() ATTACH_TYPE):') verify(source_cgroup_attach_types, man_cgroup_attach_types, f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {ManCgroupExtractor.filename} (ATTACH_TYPE):') verify(help_cgroup_options, man_cgroup_options, f'Comparing {CgroupFileExtractor.filename} (do_help() OPTIONS) and {ManCgroupExtractor.filename} (OPTIONS):') # Options for remaining commands for cmd in [ 'btf', 'feature', 'gen', 'iter', 'link', 'net', 'perf', 'struct_ops', ]: source_info = GenericSourceExtractor(cmd + '.c') help_cmd_options = source_info.get_options() source_info.close() man_cmd_info = ManGenericExtractor(os.path.join(BPFTOOL_DOC_DIR, 'bpftool-' + cmd + '.rst')) man_cmd_options = man_cmd_info.get_options() man_cmd_info.close() verify(help_cmd_options, man_cmd_options, f'Comparing {source_info.filename} (do_help() OPTIONS) and {man_cmd_info.filename} (OPTIONS):') source_main_info = GenericSourceExtractor('main.c') help_main_options = source_main_info.get_options() source_main_info.close() man_main_info = ManGenericExtractor(os.path.join(BPFTOOL_DOC_DIR, 'bpftool.rst')) man_main_options = man_main_info.get_options() man_main_info.close() verify(help_main_options, man_main_options, f'Comparing {source_main_info.filename} (do_help() OPTIONS) and {man_main_info.filename} (OPTIONS):') # Compare common options (options that apply to all commands) main_hdr_info = MainHeaderFileExtractor() source_common_options = main_hdr_info.get_common_options() main_hdr_info.close() man_substitutions = ManSubstitutionsExtractor() man_common_options = man_substitutions.get_common_options() man_substitutions.close() verify(source_common_options, man_common_options, f'Comparing common options from {main_hdr_info.filename} (HELP_SPEC_OPTIONS) and {man_substitutions.filename}:') sys.exit(retval) if __name__ == "__main__": main()
grace-kernel-grace-kernel-6.1.y
tools/testing/selftests/bpf/test_bpftool_synctypes.py
# SPDX-License-Identifier: GPL-2.0 # # Collection of configs for building non-UML kernels and running them on QEMU. # # Copyright (C) 2021, Google LLC. # Author: Brendan Higgins <[email protected]> from dataclasses import dataclass from typing import List @dataclass(frozen=True) class QemuArchParams: linux_arch: str kconfig: str qemu_arch: str kernel_path: str kernel_command_line: str extra_qemu_params: List[str]
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_config.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # A thin wrapper on top of the KUnit Kernel # # Copyright (C) 2019, Google LLC. # Author: Felix Guo <[email protected]> # Author: Brendan Higgins <[email protected]> import argparse import os import re import shlex import sys import time assert sys.version_info >= (3, 7), "Python version is too old" from dataclasses import dataclass from enum import Enum, auto from typing import Iterable, List, Optional, Sequence, Tuple import kunit_json import kunit_kernel import kunit_parser from kunit_printer import stdout class KunitStatus(Enum): SUCCESS = auto() CONFIG_FAILURE = auto() BUILD_FAILURE = auto() TEST_FAILURE = auto() @dataclass class KunitResult: status: KunitStatus elapsed_time: float @dataclass class KunitConfigRequest: build_dir: str make_options: Optional[List[str]] @dataclass class KunitBuildRequest(KunitConfigRequest): jobs: int @dataclass class KunitParseRequest: raw_output: Optional[str] json: Optional[str] @dataclass class KunitExecRequest(KunitParseRequest): build_dir: str timeout: int filter_glob: str kernel_args: Optional[List[str]] run_isolated: Optional[str] @dataclass class KunitRequest(KunitExecRequest, KunitBuildRequest): pass def get_kernel_root_path() -> str: path = sys.argv[0] if not __file__ else __file__ parts = os.path.realpath(path).split('tools/testing/kunit') if len(parts) != 2: sys.exit(1) return parts[0] def config_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitConfigRequest) -> KunitResult: stdout.print_with_timestamp('Configuring KUnit Kernel ...') config_start = time.time() success = linux.build_reconfig(request.build_dir, request.make_options) config_end = time.time() if not success: return KunitResult(KunitStatus.CONFIG_FAILURE, config_end - config_start) return KunitResult(KunitStatus.SUCCESS, config_end - config_start) def build_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitBuildRequest) -> KunitResult: stdout.print_with_timestamp('Building KUnit Kernel ...') build_start = time.time() success = linux.build_kernel(request.jobs, request.build_dir, request.make_options) build_end = time.time() if not success: return KunitResult(KunitStatus.BUILD_FAILURE, build_end - build_start) if not success: return KunitResult(KunitStatus.BUILD_FAILURE, build_end - build_start) return KunitResult(KunitStatus.SUCCESS, build_end - build_start) def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitBuildRequest) -> KunitResult: config_result = config_tests(linux, request) if config_result.status != KunitStatus.SUCCESS: return config_result return build_tests(linux, request) def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]: args = ['kunit.action=list'] if request.kernel_args: args.extend(request.kernel_args) output = linux.run_kernel(args=args, timeout=request.timeout, filter_glob=request.filter_glob, build_dir=request.build_dir) lines = kunit_parser.extract_tap_lines(output) # Hack! Drop the dummy TAP version header that the executor prints out. lines.pop() # Filter out any extraneous non-test output that might have gotten mixed in. return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)] def _suites_from_test_list(tests: List[str]) -> List[str]: """Extracts all the suites from an ordered list of tests.""" suites = [] # type: List[str] for t in tests: parts = t.split('.', maxsplit=2) if len(parts) != 2: raise ValueError(f'internal KUnit error, test name should be of the form "<suite>.<test>", got "{t}"') suite, case = parts if not suites or suites[-1] != suite: suites.append(suite) return suites def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult: filter_globs = [request.filter_glob] if request.run_isolated: tests = _list_tests(linux, request) if request.run_isolated == 'test': filter_globs = tests if request.run_isolated == 'suite': filter_globs = _suites_from_test_list(tests) # Apply the test-part of the user's glob, if present. if '.' in request.filter_glob: test_glob = request.filter_glob.split('.', maxsplit=2)[1] filter_globs = [g + '.'+ test_glob for g in filter_globs] metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig') test_counts = kunit_parser.TestCounts() exec_time = 0.0 for i, filter_glob in enumerate(filter_globs): stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs))) test_start = time.time() run_result = linux.run_kernel( args=request.kernel_args, timeout=request.timeout, filter_glob=filter_glob, build_dir=request.build_dir) _, test_result = parse_tests(request, metadata, run_result) # run_kernel() doesn't block on the kernel exiting. # That only happens after we get the last line of output from `run_result`. # So exec_time here actually contains parsing + execution time, which is fine. test_end = time.time() exec_time += test_end - test_start test_counts.add_subtest_counts(test_result.counts) if len(filter_globs) == 1 and test_counts.crashed > 0: bd = request.build_dir print('The kernel seems to have crashed; you can decode the stack traces with:') print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format( bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0])) kunit_status = _map_to_overall_status(test_counts.get_status()) return KunitResult(status=kunit_status, elapsed_time=exec_time) def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED): return KunitStatus.SUCCESS return KunitStatus.TEST_FAILURE def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]: parse_start = time.time() test_result = kunit_parser.Test() if request.raw_output: # Treat unparsed results as one passing test. test_result.status = kunit_parser.TestStatus.SUCCESS test_result.counts.passed = 1 output: Iterable[str] = input_data if request.raw_output == 'all': pass elif request.raw_output == 'kunit': output = kunit_parser.extract_tap_lines(output, lstrip=False) for line in output: print(line.rstrip()) else: test_result = kunit_parser.parse_run_tests(input_data) parse_end = time.time() if request.json: json_str = kunit_json.get_json_result( test=test_result, metadata=metadata) if request.json == 'stdout': print(json_str) else: with open(request.json, 'w') as f: f.write(json_str) stdout.print_with_timestamp("Test results stored in %s" % os.path.abspath(request.json)) if test_result.status != kunit_parser.TestStatus.SUCCESS: return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result return KunitResult(KunitStatus.SUCCESS, parse_end - parse_start), test_result def run_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitRequest) -> KunitResult: run_start = time.time() config_result = config_tests(linux, request) if config_result.status != KunitStatus.SUCCESS: return config_result build_result = build_tests(linux, request) if build_result.status != KunitStatus.SUCCESS: return build_result exec_result = exec_tests(linux, request) run_end = time.time() stdout.print_with_timestamp(( 'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' + 'building, %.3fs running\n') % ( run_end - run_start, config_result.elapsed_time, build_result.elapsed_time, exec_result.elapsed_time)) return exec_result # Problem: # $ kunit.py run --json # works as one would expect and prints the parsed test results as JSON. # $ kunit.py run --json suite_name # would *not* pass suite_name as the filter_glob and print as json. # argparse will consider it to be another way of writing # $ kunit.py run --json=suite_name # i.e. it would run all tests, and dump the json to a `suite_name` file. # So we hackily automatically rewrite --json => --json=stdout pseudo_bool_flag_defaults = { '--json': 'stdout', '--raw_output': 'kunit', } def massage_argv(argv: Sequence[str]) -> Sequence[str]: def massage_arg(arg: str) -> str: if arg not in pseudo_bool_flag_defaults: return arg return f'{arg}={pseudo_bool_flag_defaults[arg]}' return list(map(massage_arg, argv)) def get_default_jobs() -> int: return len(os.sched_getaffinity(0)) def add_common_opts(parser) -> None: parser.add_argument('--build_dir', help='As in the make command, it specifies the build ' 'directory.', type=str, default='.kunit', metavar='DIR') parser.add_argument('--make_options', help='X=Y make option, can be repeated.', action='append', metavar='X=Y') parser.add_argument('--alltests', help='Run all KUnit tests via tools/testing/kunit/configs/all_tests.config', action='store_true') parser.add_argument('--kunitconfig', help='Path to Kconfig fragment that enables KUnit tests.' ' If given a directory, (e.g. lib/kunit), "/.kunitconfig" ' 'will get automatically appended. If repeated, the files ' 'blindly concatenated, which might not work in all cases.', action='append', metavar='PATHS') parser.add_argument('--kconfig_add', help='Additional Kconfig options to append to the ' '.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.', action='append', metavar='CONFIG_X=Y') parser.add_argument('--arch', help=('Specifies the architecture to run tests under. ' 'The architecture specified here must match the ' 'string passed to the ARCH make param, ' 'e.g. i386, x86_64, arm, um, etc. Non-UML ' 'architectures run on QEMU.'), type=str, default='um', metavar='ARCH') parser.add_argument('--cross_compile', help=('Sets make\'s CROSS_COMPILE variable; it should ' 'be set to a toolchain path prefix (the prefix ' 'of gcc and other tools in your toolchain, for ' 'example `sparc64-linux-gnu-` if you have the ' 'sparc toolchain installed on your system, or ' '`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` ' 'if you have downloaded the microblaze toolchain ' 'from the 0-day website to a directory in your ' 'home directory called `toolchains`).'), metavar='PREFIX') parser.add_argument('--qemu_config', help=('Takes a path to a path to a file containing ' 'a QemuArchParams object.'), type=str, metavar='FILE') parser.add_argument('--qemu_args', help='Additional QEMU arguments, e.g. "-smp 8"', action='append', metavar='') def add_build_opts(parser) -> None: parser.add_argument('--jobs', help='As in the make command, "Specifies the number of ' 'jobs (commands) to run simultaneously."', type=int, default=get_default_jobs(), metavar='N') def add_exec_opts(parser) -> None: parser.add_argument('--timeout', help='maximum number of seconds to allow for all tests ' 'to run. This does not include time taken to build the ' 'tests.', type=int, default=300, metavar='SECONDS') parser.add_argument('filter_glob', help='Filter which KUnit test suites/tests run at ' 'boot-time, e.g. list* or list*.*del_test', type=str, nargs='?', default='', metavar='filter_glob') parser.add_argument('--kernel_args', help='Kernel command-line parameters. Maybe be repeated', action='append', metavar='') parser.add_argument('--run_isolated', help='If set, boot the kernel for each ' 'individual suite/test. This is can be useful for debugging ' 'a non-hermetic test, one that might pass/fail based on ' 'what ran before it.', type=str, choices=['suite', 'test']) def add_parse_opts(parser) -> None: parser.add_argument('--raw_output', help='If set don\'t format output from kernel. ' 'If set to --raw_output=kunit, filters to just KUnit output.', type=str, nargs='?', const='all', default=None, choices=['all', 'kunit']) parser.add_argument('--json', nargs='?', help='Stores test results in a JSON, and either ' 'prints to stdout or saves to file if a ' 'filename is specified', type=str, const='stdout', default=None, metavar='FILE') def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree: """Returns a LinuxSourceTree based on the user's arguments.""" # Allow users to specify multiple arguments in one string, e.g. '-smp 8' qemu_args: List[str] = [] if cli_args.qemu_args: for arg in cli_args.qemu_args: qemu_args.extend(shlex.split(arg)) kunitconfigs = cli_args.kunitconfig if cli_args.kunitconfig else [] if cli_args.alltests: # Prepend so user-specified options take prio if we ever allow # --kunitconfig options to have differing options. kunitconfigs = [kunit_kernel.ALL_TESTS_CONFIG_PATH] + kunitconfigs return kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_paths=kunitconfigs, kconfig_add=cli_args.kconfig_add, arch=cli_args.arch, cross_compile=cli_args.cross_compile, qemu_config_path=cli_args.qemu_config, extra_qemu_args=qemu_args) def main(argv): parser = argparse.ArgumentParser( description='Helps writing and running KUnit tests.') subparser = parser.add_subparsers(dest='subcommand') # The 'run' command will config, build, exec, and parse in one go. run_parser = subparser.add_parser('run', help='Runs KUnit tests.') add_common_opts(run_parser) add_build_opts(run_parser) add_exec_opts(run_parser) add_parse_opts(run_parser) config_parser = subparser.add_parser('config', help='Ensures that .config contains all of ' 'the options in .kunitconfig') add_common_opts(config_parser) build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests') add_common_opts(build_parser) add_build_opts(build_parser) exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests') add_common_opts(exec_parser) add_exec_opts(exec_parser) add_parse_opts(exec_parser) # The 'parse' option is special, as it doesn't need the kernel source # (therefore there is no need for a build_dir, hence no add_common_opts) # and the '--file' argument is not relevant to 'run', so isn't in # add_parse_opts() parse_parser = subparser.add_parser('parse', help='Parses KUnit results from a file, ' 'and parses formatted results.') add_parse_opts(parse_parser) parse_parser.add_argument('file', help='Specifies the file to read results from.', type=str, nargs='?', metavar='input_file') cli_args = parser.parse_args(massage_argv(argv)) if get_kernel_root_path(): os.chdir(get_kernel_root_path()) if cli_args.subcommand == 'run': if not os.path.exists(cli_args.build_dir): os.mkdir(cli_args.build_dir) linux = tree_from_args(cli_args) request = KunitRequest(build_dir=cli_args.build_dir, make_options=cli_args.make_options, jobs=cli_args.jobs, raw_output=cli_args.raw_output, json=cli_args.json, timeout=cli_args.timeout, filter_glob=cli_args.filter_glob, kernel_args=cli_args.kernel_args, run_isolated=cli_args.run_isolated) result = run_tests(linux, request) if result.status != KunitStatus.SUCCESS: sys.exit(1) elif cli_args.subcommand == 'config': if cli_args.build_dir and ( not os.path.exists(cli_args.build_dir)): os.mkdir(cli_args.build_dir) linux = tree_from_args(cli_args) request = KunitConfigRequest(build_dir=cli_args.build_dir, make_options=cli_args.make_options) result = config_tests(linux, request) stdout.print_with_timestamp(( 'Elapsed time: %.3fs\n') % ( result.elapsed_time)) if result.status != KunitStatus.SUCCESS: sys.exit(1) elif cli_args.subcommand == 'build': linux = tree_from_args(cli_args) request = KunitBuildRequest(build_dir=cli_args.build_dir, make_options=cli_args.make_options, jobs=cli_args.jobs) result = config_and_build_tests(linux, request) stdout.print_with_timestamp(( 'Elapsed time: %.3fs\n') % ( result.elapsed_time)) if result.status != KunitStatus.SUCCESS: sys.exit(1) elif cli_args.subcommand == 'exec': linux = tree_from_args(cli_args) exec_request = KunitExecRequest(raw_output=cli_args.raw_output, build_dir=cli_args.build_dir, json=cli_args.json, timeout=cli_args.timeout, filter_glob=cli_args.filter_glob, kernel_args=cli_args.kernel_args, run_isolated=cli_args.run_isolated) result = exec_tests(linux, exec_request) stdout.print_with_timestamp(( 'Elapsed time: %.3fs\n') % (result.elapsed_time)) if result.status != KunitStatus.SUCCESS: sys.exit(1) elif cli_args.subcommand == 'parse': if cli_args.file is None: sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error kunit_output = sys.stdin else: with open(cli_args.file, 'r', errors='backslashreplace') as f: kunit_output = f.read().splitlines() # We know nothing about how the result was created! metadata = kunit_json.Metadata() request = KunitParseRequest(raw_output=cli_args.raw_output, json=cli_args.json) result, _ = parse_tests(request, metadata, kunit_output) if result.status != KunitStatus.SUCCESS: sys.exit(1) else: parser.print_help() if __name__ == '__main__': main(sys.argv[1:])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # Utilities for printing and coloring output. # # Copyright (C) 2022, Google LLC. # Author: Daniel Latypov <[email protected]> import datetime import sys import typing _RESET = '\033[0;0m' class Printer: """Wraps a file object, providing utilities for coloring output, etc.""" def __init__(self, output: typing.IO): self._output = output self._use_color = output.isatty() def print(self, message: str) -> None: print(message, file=self._output) def print_with_timestamp(self, message: str) -> None: ts = datetime.datetime.now().strftime('%H:%M:%S') self.print(f'[{ts}] {message}') def _color(self, code: str, text: str) -> str: if not self._use_color: return text return code + text + _RESET def red(self, text: str) -> str: return self._color('\033[1;31m', text) def yellow(self, text: str) -> str: return self._color('\033[1;33m', text) def green(self, text: str) -> str: return self._color('\033[1;32m', text) def color_len(self) -> int: """Returns the length of the color escape codes.""" return len(self.red('')) # Provides a default instance that prints to stdout stdout = Printer(sys.stdout)
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit_printer.py
# SPDX-License-Identifier: GPL-2.0 # # Parses KTAP test results from a kernel dmesg log and incrementally prints # results with reader-friendly format. Stores and returns test results in a # Test object. # # Copyright (C) 2019, Google LLC. # Author: Felix Guo <[email protected]> # Author: Brendan Higgins <[email protected]> # Author: Rae Moar <[email protected]> from __future__ import annotations import re import sys from enum import Enum, auto from typing import Iterable, Iterator, List, Optional, Tuple from kunit_printer import stdout class Test: """ A class to represent a test parsed from KTAP results. All KTAP results within a test log are stored in a main Test object as subtests. Attributes: status : TestStatus - status of the test name : str - name of the test expected_count : int - expected number of subtests (0 if single test case and None if unknown expected number of subtests) subtests : List[Test] - list of subtests log : List[str] - log of KTAP lines that correspond to the test counts : TestCounts - counts of the test statuses and errors of subtests or of the test itself if the test is a single test case. """ def __init__(self) -> None: """Creates Test object with default attributes.""" self.status = TestStatus.TEST_CRASHED self.name = '' self.expected_count = 0 # type: Optional[int] self.subtests = [] # type: List[Test] self.log = [] # type: List[str] self.counts = TestCounts() def __str__(self) -> str: """Returns string representation of a Test class object.""" return (f'Test({self.status}, {self.name}, {self.expected_count}, ' f'{self.subtests}, {self.log}, {self.counts})') def __repr__(self) -> str: """Returns string representation of a Test class object.""" return str(self) def add_error(self, error_message: str) -> None: """Records an error that occurred while parsing this test.""" self.counts.errors += 1 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') class TestStatus(Enum): """An enumeration class to represent the status of a test.""" SUCCESS = auto() FAILURE = auto() SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() class TestCounts: """ Tracks the counts of statuses of all test cases and any errors within a Test. Attributes: passed : int - the number of tests that have passed failed : int - the number of tests that have failed crashed : int - the number of tests that have crashed skipped : int - the number of tests that have skipped errors : int - the number of errors in the test and subtests """ def __init__(self): """Creates TestCounts object with counts of all test statuses and test errors set to 0. """ self.passed = 0 self.failed = 0 self.crashed = 0 self.skipped = 0 self.errors = 0 def __str__(self) -> str: """Returns the string representation of a TestCounts object.""" statuses = [('passed', self.passed), ('failed', self.failed), ('crashed', self.crashed), ('skipped', self.skipped), ('errors', self.errors)] return f'Ran {self.total()} tests: ' + \ ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) def total(self) -> int: """Returns the total number of test cases within a test object, where a test case is a test with no subtests. """ return (self.passed + self.failed + self.crashed + self.skipped) def add_subtest_counts(self, counts: TestCounts) -> None: """ Adds the counts of another TestCounts object to the current TestCounts object. Used to add the counts of a subtest to the parent test. Parameters: counts - a different TestCounts object whose counts will be added to the counts of the TestCounts object """ self.passed += counts.passed self.failed += counts.failed self.crashed += counts.crashed self.skipped += counts.skipped self.errors += counts.errors def get_status(self) -> TestStatus: """Returns the aggregated status of a Test using test counts. """ if self.total() == 0: return TestStatus.NO_TESTS if self.crashed: # Crashes should take priority. return TestStatus.TEST_CRASHED if self.failed: return TestStatus.FAILURE if self.passed: # No failures or crashes, looks good! return TestStatus.SUCCESS # We have only skipped tests. return TestStatus.SKIPPED def add_status(self, status: TestStatus) -> None: """Increments the count for `status`.""" if status == TestStatus.SUCCESS: self.passed += 1 elif status == TestStatus.FAILURE: self.failed += 1 elif status == TestStatus.SKIPPED: self.skipped += 1 elif status != TestStatus.NO_TESTS: self.crashed += 1 class LineStream: """ A class to represent the lines of kernel output. Provides a lazy peek()/pop() interface over an iterator of (line#, text). """ _lines: Iterator[Tuple[int, str]] _next: Tuple[int, str] _need_next: bool _done: bool def __init__(self, lines: Iterator[Tuple[int, str]]): """Creates a new LineStream that wraps the given iterator.""" self._lines = lines self._done = False self._need_next = True self._next = (0, '') def _get_next(self) -> None: """Advances the LineSteam to the next line, if necessary.""" if not self._need_next: return try: self._next = next(self._lines) except StopIteration: self._done = True finally: self._need_next = False def peek(self) -> str: """Returns the current line, without advancing the LineStream. """ self._get_next() return self._next[1] def pop(self) -> str: """Returns the current line and advances the LineStream to the next line. """ s = self.peek() if self._done: raise ValueError(f'LineStream: going past EOF, last line was {s}') self._need_next = True return s def __bool__(self) -> bool: """Returns True if stream has more lines.""" self._get_next() return not self._done # Only used by kunit_tool_test.py. def __iter__(self) -> Iterator[str]: """Empties all lines stored in LineStream object into Iterator object and returns the Iterator object. """ while bool(self): yield self.pop() def line_number(self) -> int: """Returns the line number of the current line.""" self._get_next() return self._next[0] # Parsing helper methods: KTAP_START = re.compile(r'KTAP version ([0-9]+)$') TAP_START = re.compile(r'TAP version ([0-9]+)$') KTAP_END = re.compile('(List of all partitions:|' 'Kernel panic - not syncing: VFS:|reboot: System halted)') def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: """Extracts KTAP lines from the kernel output.""" def isolate_ktap_output(kernel_output: Iterable[str]) \ -> Iterator[Tuple[int, str]]: line_num = 0 started = False for line in kernel_output: line_num += 1 line = line.rstrip() # remove trailing \n if not started and KTAP_START.search(line): # start extracting KTAP lines and set prefix # to number of characters before version line prefix_len = len( line.split('KTAP version')[0]) started = True yield line_num, line[prefix_len:] elif not started and TAP_START.search(line): # start extracting KTAP lines and set prefix # to number of characters before version line prefix_len = len(line.split('TAP version')[0]) started = True yield line_num, line[prefix_len:] elif started and KTAP_END.search(line): # stop extracting KTAP lines break elif started: # remove the prefix and optionally any leading # whitespace. Our parsing logic relies on this. line = line[prefix_len:] if lstrip: line = line.lstrip() yield line_num, line return LineStream(lines=isolate_ktap_output(kernel_output)) KTAP_VERSIONS = [1] TAP_VERSIONS = [13, 14] def check_version(version_num: int, accepted_versions: List[int], version_type: str, test: Test) -> None: """ Adds error to test object if version number is too high or too low. Parameters: version_num - The inputted version number from the parsed KTAP or TAP header line accepted_version - List of accepted KTAP or TAP versions version_type - 'KTAP' or 'TAP' depending on the type of version line. test - Test object for current test being parsed """ if version_num < min(accepted_versions): test.add_error(f'{version_type} version lower than expected!') elif version_num > max(accepted_versions): test.add_error(f'{version_type} version higer than expected!') def parse_ktap_header(lines: LineStream, test: Test) -> bool: """ Parses KTAP/TAP header line and checks version number. Returns False if fails to parse KTAP/TAP header line. Accepted formats: - 'KTAP version [version number]' - 'TAP version [version number]' Parameters: lines - LineStream of KTAP output to parse test - Test object for current test being parsed Return: True if successfully parsed KTAP/TAP header line """ ktap_match = KTAP_START.match(lines.peek()) tap_match = TAP_START.match(lines.peek()) if ktap_match: version_num = int(ktap_match.group(1)) check_version(version_num, KTAP_VERSIONS, 'KTAP', test) elif tap_match: version_num = int(tap_match.group(1)) check_version(version_num, TAP_VERSIONS, 'TAP', test) else: return False test.log.append(lines.pop()) return True TEST_HEADER = re.compile(r'^# Subtest: (.*)$') def parse_test_header(lines: LineStream, test: Test) -> bool: """ Parses test header and stores test name in test object. Returns False if fails to parse test header line. Accepted format: - '# Subtest: [test name]' Parameters: lines - LineStream of KTAP output to parse test - Test object for current test being parsed Return: True if successfully parsed test header line """ match = TEST_HEADER.match(lines.peek()) if not match: return False test.log.append(lines.pop()) test.name = match.group(1) return True TEST_PLAN = re.compile(r'1\.\.([0-9]+)') def parse_test_plan(lines: LineStream, test: Test) -> bool: """ Parses test plan line and stores the expected number of subtests in test object. Reports an error if expected count is 0. Returns False and sets expected_count to None if there is no valid test plan. Accepted format: - '1..[number of subtests]' Parameters: lines - LineStream of KTAP output to parse test - Test object for current test being parsed Return: True if successfully parsed test plan line """ match = TEST_PLAN.match(lines.peek()) if not match: test.expected_count = None return False test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count return True TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') def peek_test_name_match(lines: LineStream, test: Test) -> bool: """ Matches current line with the format of a test result line and checks if the name matches the name of the current test. Returns False if fails to match format or name. Accepted format: - '[ok|not ok] [test number] [-] [test name] [optional skip directive]' Parameters: lines - LineStream of KTAP output to parse test - Test object for current test being parsed Return: True if matched a test result line and the name matching the expected test name """ line = lines.peek() match = TEST_RESULT.match(line) if not match: return False name = match.group(4) return name == test.name def parse_test_result(lines: LineStream, test: Test, expected_num: int) -> bool: """ Parses test result line and stores the status and name in the test object. Reports an error if the test number does not match expected test number. Returns False if fails to parse test result line. Note that the SKIP directive is the only direction that causes a change in status. Accepted format: - '[ok|not ok] [test number] [-] [test name] [optional skip directive]' Parameters: lines - LineStream of KTAP output to parse test - Test object for current test being parsed expected_num - expected test number for current test Return: True if successfully parsed a test result line. """ line = lines.peek() match = TEST_RESULT.match(line) skip_match = TEST_RESULT_SKIP.match(line) # Check if line matches test result line format if not match: return False test.log.append(lines.pop()) # Set name of test object if skip_match: test.name = skip_match.group(4) else: test.name = match.group(4) # Check test num num = int(match.group(2)) if num != expected_num: test.add_error(f'Expected test number {expected_num} but found {num}') # Set status of test object status = match.group(1) if skip_match: test.status = TestStatus.SKIPPED elif status == 'ok': test.status = TestStatus.SUCCESS else: test.status = TestStatus.FAILURE return True def parse_diagnostic(lines: LineStream) -> List[str]: """ Parse lines that do not match the format of a test result line or test header line and returns them in list. Line formats that are not parsed: - '# Subtest: [test name]' - '[ok|not ok] [test number] [-] [test name] [optional skip directive]' Parameters: lines - LineStream of KTAP output to parse Return: Log of diagnostic lines """ log = [] # type: List[str] while lines and not TEST_RESULT.match(lines.peek()) and not \ TEST_HEADER.match(lines.peek()): log.append(lines.pop()) return log # Printing helper methods: DIVIDER = '=' * 60 def format_test_divider(message: str, len_message: int) -> str: """ Returns string with message centered in fixed width divider. Example: '===================== message example =====================' Parameters: message - message to be centered in divider line len_message - length of the message to be printed such that any characters of the color codes are not counted Return: String containing message centered in fixed width divider """ default_count = 3 # default number of dashes len_1 = default_count len_2 = default_count difference = len(DIVIDER) - len_message - 2 # 2 spaces added if difference > 0: # calculate number of dashes for each side of the divider len_1 = int(difference / 2) len_2 = difference - len_1 return ('=' * len_1) + f' {message} ' + ('=' * len_2) def print_test_header(test: Test) -> None: """ Prints test header with test name and optionally the expected number of subtests. Example: '=================== example (2 subtests) ===================' Parameters: test - Test object representing current test being printed """ message = test.name if test.expected_count: if test.expected_count == 1: message += ' (1 subtest)' else: message += f' ({test.expected_count} subtests)' stdout.print_with_timestamp(format_test_divider(message, len(message))) def print_log(log: Iterable[str]) -> None: """Prints all strings in saved log for test in yellow.""" for m in log: stdout.print_with_timestamp(stdout.yellow(m)) def format_test_result(test: Test) -> str: """ Returns string with formatted test result with colored status and test name. Example: '[PASSED] example' Parameters: test - Test object representing current test being printed Return: String containing formatted test result """ if test.status == TestStatus.SUCCESS: return stdout.green('[PASSED] ') + test.name if test.status == TestStatus.SKIPPED: return stdout.yellow('[SKIPPED] ') + test.name if test.status == TestStatus.NO_TESTS: return stdout.yellow('[NO TESTS RUN] ') + test.name if test.status == TestStatus.TEST_CRASHED: print_log(test.log) return stdout.red('[CRASHED] ') + test.name print_log(test.log) return stdout.red('[FAILED] ') + test.name def print_test_result(test: Test) -> None: """ Prints result line with status of test. Example: '[PASSED] example' Parameters: test - Test object representing current test being printed """ stdout.print_with_timestamp(format_test_result(test)) def print_test_footer(test: Test) -> None: """ Prints test footer with status of test. Example: '===================== [PASSED] example =====================' Parameters: test - Test object representing current test being printed """ message = format_test_result(test) stdout.print_with_timestamp(format_test_divider(message, len(message) - stdout.color_len())) def print_summary_line(test: Test) -> None: """ Prints summary line of test object. Color of line is dependent on status of test. Color is green if test passes, yellow if test is skipped, and red if the test fails or crashes. Summary line contains counts of the statuses of the tests subtests or the test itself if it has no subtests. Example: "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, Errors: 0" test - Test object representing current test being printed """ if test.status == TestStatus.SUCCESS: color = stdout.green elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS): color = stdout.yellow else: color = stdout.red stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) # Other methods: def bubble_up_test_results(test: Test) -> None: """ If the test has subtests, add the test counts of the subtests to the test and check if any of the tests crashed and if so set the test status to crashed. Otherwise if the test has no subtests add the status of the test to the test counts. Parameters: test - Test object for current test being parsed """ subtests = test.subtests counts = test.counts status = test.status for t in subtests: counts.add_subtest_counts(t.counts) if counts.total() == 0: counts.add_status(status) elif test.counts.get_status() == TestStatus.TEST_CRASHED: test.status = TestStatus.TEST_CRASHED def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: """ Finds next test to parse in LineStream, creates new Test object, parses any subtests of the test, populates Test object with all information (status, name) about the test and the Test objects for any subtests, and then returns the Test object. The method accepts three formats of tests: Accepted test formats: - Main KTAP/TAP header Example: KTAP version 1 1..4 [subtests] - Subtest header line Example: # Subtest: name 1..3 [subtests] ok 1 name - Test result line Example: ok 1 - test Parameters: lines - LineStream of KTAP output to parse expected_num - expected test number for test to be parsed log - list of strings containing any preceding diagnostic lines corresponding to the current test Return: Test object populated with characteristics and any subtests """ test = Test() test.log.extend(log) parent_test = False main = parse_ktap_header(lines, test) if main: # If KTAP/TAP header is found, attempt to parse # test plan test.name = "main" parse_test_plan(lines, test) parent_test = True else: # If KTAP/TAP header is not found, test must be subtest # header or test result line so parse attempt to parser # subtest header parent_test = parse_test_header(lines, test) if parent_test: # If subtest header is found, attempt to parse # test plan and print header parse_test_plan(lines, test) print_test_header(test) expected_count = test.expected_count subtests = [] test_num = 1 while parent_test and (expected_count is None or test_num <= expected_count): # Loop to parse any subtests. # Break after parsing expected number of tests or # if expected number of tests is unknown break when test # result line with matching name to subtest header is found # or no more lines in stream. sub_log = parse_diagnostic(lines) sub_test = Test() if not lines or (peek_test_name_match(lines, test) and not main): if expected_count and test_num <= expected_count: # If parser reaches end of test before # parsing expected number of subtests, print # crashed subtest and record error test.add_error('missing expected subtest!') sub_test.log.extend(sub_log) test.counts.add_status( TestStatus.TEST_CRASHED) print_test_result(sub_test) else: test.log.extend(sub_log) break else: sub_test = parse_test(lines, test_num, sub_log) subtests.append(sub_test) test_num += 1 test.subtests = subtests if not main: # If not main test, look for test result line test.log.extend(parse_diagnostic(lines)) if (parent_test and peek_test_name_match(lines, test)) or \ not parent_test: parse_test_result(lines, test, expected_num) else: test.add_error('missing subtest result line!') # Check for there being no tests if parent_test and len(subtests) == 0: # Don't override a bad status if this test had one reported. # Assumption: no subtests means CRASHED is from Test.__init__() if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): test.status = TestStatus.NO_TESTS test.add_error('0 tests run!') # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) if parent_test and not main: # If test has subtests and is not the main test object, print # footer. print_test_footer(test) elif not main: print_test_result(test) return test def parse_run_tests(kernel_output: Iterable[str]) -> Test: """ Using kernel output, extract KTAP lines, parse the lines for test results and print condensed test results and summary line. Parameters: kernel_output - Iterable object contains lines of kernel output Return: Test - the main test object with all subtests. """ stdout.print_with_timestamp(DIVIDER) lines = extract_tap_lines(kernel_output) test = Test() if not lines: test.name = '<missing>' test.add_error('could not find any KTAP output!') test.status = TestStatus.FAILURE_TO_PARSE_TESTS else: test = parse_test(lines, 0, []) if test.status != TestStatus.NO_TESTS: test.status = test.counts.get_status() stdout.print_with_timestamp(DIVIDER) print_summary_line(test) return test
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit_parser.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # A collection of tests for tools/testing/kunit/kunit.py # # Copyright (C) 2019, Google LLC. # Author: Brendan Higgins <[email protected]> import unittest from unittest import mock import tempfile, shutil # Handling test_tmpdir import itertools import json import os import signal import subprocess from typing import Iterable import kunit_config import kunit_parser import kunit_kernel import kunit_json import kunit test_tmpdir = '' abs_test_data_dir = '' def setUpModule(): global test_tmpdir, abs_test_data_dir test_tmpdir = tempfile.mkdtemp() abs_test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test_data')) def tearDownModule(): shutil.rmtree(test_tmpdir) def test_data_path(path): return os.path.join(abs_test_data_dir, path) class KconfigTest(unittest.TestCase): def test_is_subset_of(self): kconfig0 = kunit_config.Kconfig() self.assertTrue(kconfig0.is_subset_of(kconfig0)) kconfig1 = kunit_config.Kconfig() kconfig1.add_entry('TEST', 'y') self.assertTrue(kconfig1.is_subset_of(kconfig1)) self.assertTrue(kconfig0.is_subset_of(kconfig1)) self.assertFalse(kconfig1.is_subset_of(kconfig0)) def test_read_from_file(self): kconfig_path = test_data_path('test_read_from_file.kconfig') kconfig = kunit_config.parse_file(kconfig_path) expected_kconfig = kunit_config.Kconfig() expected_kconfig.add_entry('UML', 'y') expected_kconfig.add_entry('MMU', 'y') expected_kconfig.add_entry('TEST', 'y') expected_kconfig.add_entry('EXAMPLE_TEST', 'y') expected_kconfig.add_entry('MK8', 'n') self.assertEqual(kconfig, expected_kconfig) def test_write_to_file(self): kconfig_path = os.path.join(test_tmpdir, '.config') expected_kconfig = kunit_config.Kconfig() expected_kconfig.add_entry('UML', 'y') expected_kconfig.add_entry('MMU', 'y') expected_kconfig.add_entry('TEST', 'y') expected_kconfig.add_entry('EXAMPLE_TEST', 'y') expected_kconfig.add_entry('MK8', 'n') expected_kconfig.write_to_file(kconfig_path) actual_kconfig = kunit_config.parse_file(kconfig_path) self.assertEqual(actual_kconfig, expected_kconfig) class KUnitParserTest(unittest.TestCase): def assertContains(self, needle: str, haystack: kunit_parser.LineStream): # Clone the iterator so we can print the contents on failure. copy, backup = itertools.tee(haystack) for line in copy: if needle in line: return raise AssertionError(f'"{needle}" not found in {list(backup)}!') def test_output_isolated_correctly(self): log_path = test_data_path('test_output_isolated_correctly.log') with open(log_path) as file: result = kunit_parser.extract_tap_lines(file.readlines()) self.assertContains('TAP version 14', result) self.assertContains('# Subtest: example', result) self.assertContains('1..2', result) self.assertContains('ok 1 - example_simple_test', result) self.assertContains('ok 2 - example_mock_test', result) self.assertContains('ok 1 - example', result) def test_output_with_prefix_isolated_correctly(self): log_path = test_data_path('test_pound_sign.log') with open(log_path) as file: result = kunit_parser.extract_tap_lines(file.readlines()) self.assertContains('TAP version 14', result) self.assertContains('# Subtest: kunit-resource-test', result) self.assertContains('1..5', result) self.assertContains('ok 1 - kunit_resource_test_init_resources', result) self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result) self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result) self.assertContains('foo bar #', result) self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result) self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result) self.assertContains('ok 1 - kunit-resource-test', result) self.assertContains('foo bar # non-kunit output', result) self.assertContains('# Subtest: kunit-try-catch-test', result) self.assertContains('1..2', result) self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch', result) self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch', result) self.assertContains('ok 2 - kunit-try-catch-test', result) self.assertContains('# Subtest: string-stream-test', result) self.assertContains('1..3', result) self.assertContains('ok 1 - string_stream_test_empty_on_creation', result) self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result) self.assertContains('ok 3 - string_stream_test_get_string', result) self.assertContains('ok 3 - string-stream-test', result) def test_parse_successful_test_log(self): all_passed_log = test_data_path('test_is_test_passed-all_passed.log') with open(all_passed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) def test_parse_successful_nested_tests_log(self): all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') with open(all_passed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) def test_kselftest_nested(self): kselftest_log = test_data_path('test_is_test_passed-kselftest.log') with open(kselftest_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) def test_parse_failed_test_log(self): failed_log = test_data_path('test_is_test_passed-failure.log') with open(failed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.FAILURE, result.status) def test_no_header(self): empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') with open(empty_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests)) self.assertEqual( kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) def test_missing_test_plan(self): missing_plan_log = test_data_path('test_is_test_passed-' 'missing_plan.log') with open(missing_plan_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines( file.readlines())) # A missing test plan is not an error. self.assertEqual(0, result.counts.errors) # All tests should be accounted for. self.assertEqual(10, result.counts.total()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) def test_no_tests(self): header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') with open(header_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests)) self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.status) def test_no_tests_no_plan(self): no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') with open(no_plan_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests[0].subtests[0].subtests)) self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.subtests[0].subtests[0].status) self.assertEqual(1, result.counts.errors) def test_no_kunit_output(self): crash_log = test_data_path('test_insufficient_memory.log') print_mock = mock.patch('kunit_printer.Printer.print').start() with open(crash_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) print_mock.assert_any_call(StrContains('could not find any KTAP output!')) print_mock.stop() self.assertEqual(0, len(result.subtests)) def test_skipped_test(self): skipped_log = test_data_path('test_skip_tests.log') with open(skipped_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) # A skipped test does not fail the whole suite. self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) def test_skipped_all_tests(self): skipped_log = test_data_path('test_skip_all_tests.log') with open(skipped_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SKIPPED, result.status) def test_ignores_hyphen(self): hyphen_log = test_data_path('test_strip_hyphen.log') with open(hyphen_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) # A skipped test does not fail the whole suite. self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual( "sysctl_test", result.subtests[0].name) self.assertEqual( "example", result.subtests[1].name) file.close() def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_ignores_multiple_prefixes(self): prefix_log = test_data_path('test_multiple_prefixes.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_prefix_mixed_kernel_output(self): mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') with open(mixed_prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_prefix_poundsign(self): pound_log = test_data_path('test_pound_sign.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_kernel_panic_end(self): panic_log = test_data_path('test_kernel_panic_interrupt.log') with open(panic_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.TEST_CRASHED, result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_pound_no_prefix(self): pound_log = test_data_path('test_pound_no_prefix.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: return kunit_parser.LineStream(enumerate(strs, start=1)) class LineStreamTest(unittest.TestCase): def test_basic(self): stream = line_stream_from_strs(['hello', 'world']) self.assertTrue(stream, msg='Should be more input') self.assertEqual(stream.line_number(), 1) self.assertEqual(stream.peek(), 'hello') self.assertEqual(stream.pop(), 'hello') self.assertTrue(stream, msg='Should be more input') self.assertEqual(stream.line_number(), 2) self.assertEqual(stream.peek(), 'world') self.assertEqual(stream.pop(), 'world') self.assertFalse(stream, msg='Should be no more input') with self.assertRaisesRegex(ValueError, 'LineStream: going past EOF'): stream.pop() def test_is_lazy(self): called_times = 0 def generator(): nonlocal called_times for _ in range(1,5): called_times += 1 yield called_times, str(called_times) stream = kunit_parser.LineStream(generator()) self.assertEqual(called_times, 0) self.assertEqual(stream.pop(), '1') self.assertEqual(called_times, 1) self.assertEqual(stream.pop(), '2') self.assertEqual(called_times, 2) class LinuxSourceTreeTest(unittest.TestCase): def setUp(self): mock.patch.object(signal, 'signal').start() self.addCleanup(mock.patch.stopall) def test_invalid_kunitconfig(self): with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'): kunit_kernel.LinuxSourceTree('', kunitconfig_paths=['/nonexistent_file']) def test_valid_kunitconfig(self): with tempfile.NamedTemporaryFile('wt') as kunitconfig: kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[kunitconfig.name]) def test_dir_kunitconfig(self): with tempfile.TemporaryDirectory('') as dir: with open(os.path.join(dir, '.kunitconfig'), 'w'): pass kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir]) def test_multiple_kunitconfig(self): want_kconfig = kunit_config.Kconfig() want_kconfig.add_entry('KUNIT', 'y') want_kconfig.add_entry('KUNIT_TEST', 'm') with tempfile.TemporaryDirectory('') as dir: other = os.path.join(dir, 'otherkunitconfig') with open(os.path.join(dir, '.kunitconfig'), 'w') as f: f.write('CONFIG_KUNIT=y') with open(other, 'w') as f: f.write('CONFIG_KUNIT_TEST=m') pass tree = kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other]) self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig) def test_multiple_kunitconfig_invalid(self): with tempfile.TemporaryDirectory('') as dir: other = os.path.join(dir, 'otherkunitconfig') with open(os.path.join(dir, '.kunitconfig'), 'w') as f: f.write('CONFIG_KUNIT=y') with open(other, 'w') as f: f.write('CONFIG_KUNIT=m') with self.assertRaisesRegex(kunit_kernel.ConfigError, '(?s)Multiple values.*CONFIG_KUNIT'): kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other]) def test_kconfig_add(self): want_kconfig = kunit_config.Kconfig() want_kconfig.add_entry('NOT_REAL', 'y') tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y']) self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig) def test_invalid_arch(self): with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'): kunit_kernel.LinuxSourceTree('', arch='invalid') def test_run_kernel_hits_exception(self): def fake_start(unused_args, unused_build_dir): return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE) with tempfile.TemporaryDirectory('') as build_dir: tree = kunit_kernel.LinuxSourceTree(build_dir) mock.patch.object(tree._ops, 'start', side_effect=fake_start).start() with self.assertRaises(ValueError): for line in tree.run_kernel(build_dir=build_dir): self.assertEqual(line, 'hi\n') raise ValueError('uh oh, did not read all output') with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile: self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output') def test_build_reconfig_no_config(self): with tempfile.TemporaryDirectory('') as build_dir: with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y') tree = kunit_kernel.LinuxSourceTree(build_dir) # Stub out the source tree operations, so we don't have # the defaults for any given architecture get in the # way. tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) mock_build_config = mock.patch.object(tree, 'build_config').start() # Should generate the .config self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) mock_build_config.assert_called_once_with(build_dir, []) def test_build_reconfig_existing_config(self): with tempfile.TemporaryDirectory('') as build_dir: # Existing .config is a superset, should not touch it with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y') with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y') with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') tree = kunit_kernel.LinuxSourceTree(build_dir) # Stub out the source tree operations, so we don't have # the defaults for any given architecture get in the # way. tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) mock_build_config = mock.patch.object(tree, 'build_config').start() self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) self.assertEqual(mock_build_config.call_count, 0) def test_build_reconfig_remove_option(self): with tempfile.TemporaryDirectory('') as build_dir: # We removed CONFIG_KUNIT_TEST=y from our .kunitconfig... with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y') with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') tree = kunit_kernel.LinuxSourceTree(build_dir) # Stub out the source tree operations, so we don't have # the defaults for any given architecture get in the # way. tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) mock_build_config = mock.patch.object(tree, 'build_config').start() # ... so we should trigger a call to build_config() self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) mock_build_config.assert_called_once_with(build_dir, []) # TODO: add more test cases. class KUnitJsonTest(unittest.TestCase): def _json_for(self, log_file): with open(test_data_path(log_file)) as file: test_result = kunit_parser.parse_run_tests(file) json_obj = kunit_json.get_json_result( test=test_result, metadata=kunit_json.Metadata()) return json.loads(json_obj) def test_failed_test_json(self): result = self._json_for('test_is_test_passed-failure.log') self.assertEqual( {'name': 'example_simple_test', 'status': 'FAIL'}, result["sub_groups"][1]["test_cases"][0]) def test_crashed_test_json(self): result = self._json_for('test_kernel_panic_interrupt.log') self.assertEqual( {'name': '', 'status': 'ERROR'}, result["sub_groups"][2]["test_cases"][1]) def test_skipped_test_json(self): result = self._json_for('test_skip_tests.log') self.assertEqual( {'name': 'example_skip_test', 'status': 'SKIP'}, result["sub_groups"][1]["test_cases"][1]) def test_no_tests_json(self): result = self._json_for('test_is_test_passed-no_tests_run_with_header.log') self.assertEqual(0, len(result['sub_groups'])) def test_nested_json(self): result = self._json_for('test_is_test_passed-all_passed_nested.log') self.assertEqual( {'name': 'example_simple_test', 'status': 'PASS'}, result["sub_groups"][0]["sub_groups"][0]["test_cases"][0]) class StrContains(str): def __eq__(self, other): return self in other class KUnitMainTest(unittest.TestCase): def setUp(self): path = test_data_path('test_is_test_passed-all_passed.log') with open(path) as file: all_passed_log = file.readlines() self.print_mock = mock.patch('kunit_printer.Printer.print').start() self.addCleanup(mock.patch.stopall) self.mock_linux_init = mock.patch.object(kunit_kernel, 'LinuxSourceTree').start() self.linux_source_mock = self.mock_linux_init.return_value self.linux_source_mock.build_reconfig.return_value = True self.linux_source_mock.build_kernel.return_value = True self.linux_source_mock.run_kernel.return_value = all_passed_log def test_config_passes_args_pass(self): kunit.main(['config', '--build_dir=.kunit']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) def test_build_passes_args_pass(self): kunit.main(['build']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.linux_source_mock.build_kernel.assert_called_once_with(kunit.get_default_jobs(), '.kunit', None) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) def test_exec_passes_args_pass(self): kunit.main(['exec']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_passes_args_pass(self): kunit.main(['run']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_exec_passes_args_fail(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) with self.assertRaises(SystemExit) as e: kunit.main(['exec']) self.assertEqual(e.exception.code, 1) def test_run_passes_args_fail(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) with self.assertRaises(SystemExit) as e: kunit.main(['run']) self.assertEqual(e.exception.code, 1) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) self.print_mock.assert_any_call(StrContains('could not find any KTAP output!')) def test_exec_no_tests(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) with self.assertRaises(SystemExit) as e: kunit.main(['run']) self.assertEqual(e.exception.code, 1) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains(' 0 tests run!')) def test_exec_raw_output(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) kunit.main(['exec', '--raw_output']) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) for call in self.print_mock.call_args_list: self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) def test_run_raw_output(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) kunit.main(['run', '--raw_output']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) for call in self.print_mock.call_args_list: self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) def test_run_raw_output_kunit(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) kunit.main(['run', '--raw_output=kunit']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) for call in self.print_mock.call_args_list: self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) self.assertNotEqual(call, mock.call(StrContains(' 0 tests run'))) def test_run_raw_output_invalid(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) with self.assertRaises(SystemExit) as e: kunit.main(['run', '--raw_output=invalid']) self.assertNotEqual(e.exception.code, 0) def test_run_raw_output_does_not_take_positional_args(self): # --raw_output is a string flag, but we don't want it to consume # any positional arguments, only ones after an '=' self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) kunit.main(['run', '--raw_output', 'filter_glob']) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300) def test_exec_timeout(self): timeout = 3453 kunit.main(['exec', '--timeout', str(timeout)]) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=timeout) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_timeout(self): timeout = 3453 kunit.main(['run', '--timeout', str(timeout)]) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=timeout) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_builddir(self): build_dir = '.kunit' kunit.main(['run', '--build_dir=.kunit']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir=build_dir, filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_config_builddir(self): build_dir = '.kunit' kunit.main(['config', '--build_dir', build_dir]) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) def test_build_builddir(self): build_dir = '.kunit' jobs = kunit.get_default_jobs() kunit.main(['build', '--build_dir', build_dir]) self.linux_source_mock.build_kernel.assert_called_once_with(jobs, build_dir, None) def test_exec_builddir(self): build_dir = '.kunit' kunit.main(['exec', '--build_dir', build_dir]) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir=build_dir, filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_kunitconfig(self): kunit.main(['run', '--kunitconfig=mykunitconfig']) # Just verify that we parsed and initialized it correctly here. self.mock_linux_init.assert_called_once_with('.kunit', kunitconfig_paths=['mykunitconfig'], kconfig_add=None, arch='um', cross_compile=None, qemu_config_path=None, extra_qemu_args=[]) def test_config_kunitconfig(self): kunit.main(['config', '--kunitconfig=mykunitconfig']) # Just verify that we parsed and initialized it correctly here. self.mock_linux_init.assert_called_once_with('.kunit', kunitconfig_paths=['mykunitconfig'], kconfig_add=None, arch='um', cross_compile=None, qemu_config_path=None, extra_qemu_args=[]) def test_config_alltests(self): kunit.main(['config', '--kunitconfig=mykunitconfig', '--alltests']) # Just verify that we parsed and initialized it correctly here. self.mock_linux_init.assert_called_once_with('.kunit', kunitconfig_paths=[kunit_kernel.ALL_TESTS_CONFIG_PATH, 'mykunitconfig'], kconfig_add=None, arch='um', cross_compile=None, qemu_config_path=None, extra_qemu_args=[]) @mock.patch.object(kunit_kernel, 'LinuxSourceTree') def test_run_multiple_kunitconfig(self, mock_linux_init): mock_linux_init.return_value = self.linux_source_mock kunit.main(['run', '--kunitconfig=mykunitconfig', '--kunitconfig=other']) # Just verify that we parsed and initialized it correctly here. mock_linux_init.assert_called_once_with('.kunit', kunitconfig_paths=['mykunitconfig', 'other'], kconfig_add=None, arch='um', cross_compile=None, qemu_config_path=None, extra_qemu_args=[]) def test_run_kconfig_add(self): kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y']) # Just verify that we parsed and initialized it correctly here. self.mock_linux_init.assert_called_once_with('.kunit', kunitconfig_paths=[], kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'], arch='um', cross_compile=None, qemu_config_path=None, extra_qemu_args=[]) def test_run_qemu_args(self): kunit.main(['run', '--arch=x86_64', '--qemu_args', '-m 2048']) # Just verify that we parsed and initialized it correctly here. self.mock_linux_init.assert_called_once_with('.kunit', kunitconfig_paths=[], kconfig_add=None, arch='x86_64', cross_compile=None, qemu_config_path=None, extra_qemu_args=['-m', '2048']) def test_run_kernel_args(self): kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2']) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_list_tests(self): want = ['suite.test1', 'suite.test2', 'suite2.test1'] self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want got = kunit._list_tests(self.linux_source_mock, kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'suite')) self.assertEqual(got, want) # Should respect the user's filter glob when listing tests. self.linux_source_mock.run_kernel.assert_called_once_with( args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', timeout=300) @mock.patch.object(kunit, '_list_tests') def test_run_isolated_by_suite(self, mock_tests): mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1'] kunit.main(['exec', '--run_isolated=suite', 'suite*.test*']) # Should respect the user's filter glob when listing tests. mock_tests.assert_called_once_with(mock.ANY, kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', None, 'suite')) self.linux_source_mock.run_kernel.assert_has_calls([ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300), ]) @mock.patch.object(kunit, '_list_tests') def test_run_isolated_by_test(self, mock_tests): mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1'] kunit.main(['exec', '--run_isolated=test', 'suite*']) # Should respect the user's filter glob when listing tests. mock_tests.assert_called_once_with(mock.ANY, kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'test')) self.linux_source_mock.run_kernel.assert_has_calls([ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', timeout=300), ]) if __name__ == '__main__': unittest.main()
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit_tool_test.py
# SPDX-License-Identifier: GPL-2.0 # # Generates JSON from KUnit results according to # KernelCI spec: https://github.com/kernelci/kernelci-doc/wiki/Test-API # # Copyright (C) 2020, Google LLC. # Author: Heidi Fahim <[email protected]> from dataclasses import dataclass import json from typing import Any, Dict from kunit_parser import Test, TestStatus @dataclass class Metadata: """Stores metadata about this run to include in get_json_result().""" arch: str = '' def_config: str = '' build_dir: str = '' JsonObj = Dict[str, Any] _status_map: Dict[TestStatus, str] = { TestStatus.SUCCESS: "PASS", TestStatus.SKIPPED: "SKIP", TestStatus.TEST_CRASHED: "ERROR", } def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj: sub_groups = [] # List[JsonObj] test_cases = [] # List[JsonObj] for subtest in test.subtests: if subtest.subtests: sub_group = _get_group_json(subtest, common_fields) sub_groups.append(sub_group) continue status = _status_map.get(subtest.status, "FAIL") test_cases.append({"name": subtest.name, "status": status}) test_group = { "name": test.name, "sub_groups": sub_groups, "test_cases": test_cases, } test_group.update(common_fields) return test_group def get_json_result(test: Test, metadata: Metadata) -> str: common_fields = { "arch": metadata.arch, "defconfig": metadata.def_config, "build_environment": metadata.build_dir, "lab_name": None, "kernel": None, "job": None, "git_branch": "kselftest", } test_group = _get_group_json(test, common_fields) test_group["name"] = "KUnit Test Group" return json.dumps(test_group, indent=4)
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit_json.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # This file runs some basic checks to verify kunit works. # It is only of interest if you're making changes to KUnit itself. # # Copyright (C) 2021, Google LLC. # Author: Daniel Latypov <[email protected]> from concurrent import futures import datetime import os import shutil import subprocess import sys import textwrap from typing import Dict, List, Sequence ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) TIMEOUT = datetime.timedelta(minutes=5).total_seconds() commands: Dict[str, Sequence[str]] = { 'kunit_tool_test.py': ['./kunit_tool_test.py'], 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'], 'pytype': ['/bin/sh', '-c', 'pytype *.py'], 'mypy': ['/bin/sh', '-c', 'mypy *.py'], } # The user might not have mypy or pytype installed, skip them if so. # Note: you can install both via `$ pip install mypy pytype` necessary_deps : Dict[str, str] = { 'pytype': 'pytype', 'mypy': 'mypy', } def main(argv: Sequence[str]) -> None: if argv: raise RuntimeError('This script takes no arguments') future_to_name: Dict[futures.Future, str] = {} executor = futures.ThreadPoolExecutor(max_workers=len(commands)) for name, argv in commands.items(): if name in necessary_deps and shutil.which(necessary_deps[name]) is None: print(f'{name}: SKIPPED, {necessary_deps[name]} not in $PATH') continue f = executor.submit(run_cmd, argv) future_to_name[f] = name has_failures = False print(f'Waiting on {len(future_to_name)} checks ({", ".join(future_to_name.values())})...') for f in futures.as_completed(future_to_name.keys()): name = future_to_name[f] ex = f.exception() if not ex: print(f'{name}: PASSED') continue has_failures = True if isinstance(ex, subprocess.TimeoutExpired): print(f'{name}: TIMED OUT') elif isinstance(ex, subprocess.CalledProcessError): print(f'{name}: FAILED') else: print(f'{name}: unexpected exception: {ex}') continue output = ex.output if output: print(textwrap.indent(output.decode(), '> ')) executor.shutdown() if has_failures: sys.exit(1) def run_cmd(argv: Sequence[str]): subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT) if __name__ == '__main__': main(sys.argv[1:])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/run_checks.py
# SPDX-License-Identifier: GPL-2.0 # # Runs UML kernel, collects output, and handles errors. # # Copyright (C) 2019, Google LLC. # Author: Felix Guo <[email protected]> # Author: Brendan Higgins <[email protected]> import importlib.abc import importlib.util import logging import subprocess import os import shlex import shutil import signal import threading from typing import Iterator, List, Optional, Tuple import kunit_config from kunit_printer import stdout import qemu_config KCONFIG_PATH = '.config' KUNITCONFIG_PATH = '.kunitconfig' OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig' DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config' ALL_TESTS_CONFIG_PATH = 'tools/testing/kunit/configs/all_tests.config' UML_KCONFIG_PATH = 'tools/testing/kunit/configs/arch_uml.config' OUTFILE_PATH = 'test.log' ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs') class ConfigError(Exception): """Represents an error trying to configure the Linux kernel.""" class BuildError(Exception): """Represents an error trying to build the Linux kernel.""" class LinuxSourceTreeOperations: """An abstraction over command line operations performed on a source tree.""" def __init__(self, linux_arch: str, cross_compile: Optional[str]): self._linux_arch = linux_arch self._cross_compile = cross_compile def make_mrproper(self) -> None: try: subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT) except OSError as e: raise ConfigError('Could not call make command: ' + str(e)) except subprocess.CalledProcessError as e: raise ConfigError(e.output.decode()) def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: return base_kunitconfig def make_olddefconfig(self, build_dir: str, make_options) -> None: command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig'] if self._cross_compile: command += ['CROSS_COMPILE=' + self._cross_compile] if make_options: command.extend(make_options) print('Populating config with:\n$', ' '.join(command)) try: subprocess.check_output(command, stderr=subprocess.STDOUT) except OSError as e: raise ConfigError('Could not call make command: ' + str(e)) except subprocess.CalledProcessError as e: raise ConfigError(e.output.decode()) def make(self, jobs, build_dir: str, make_options) -> None: command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)] if make_options: command.extend(make_options) if self._cross_compile: command += ['CROSS_COMPILE=' + self._cross_compile] print('Building with:\n$', ' '.join(command)) try: proc = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL) except OSError as e: raise BuildError('Could not call execute make: ' + str(e)) except subprocess.CalledProcessError as e: raise BuildError(e.output) _, stderr = proc.communicate() if proc.returncode != 0: raise BuildError(stderr.decode()) if stderr: # likely only due to build warnings print(stderr.decode()) def start(self, params: List[str], build_dir: str) -> subprocess.Popen: raise RuntimeError('not implemented!') class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations): def __init__(self, qemu_arch_params: qemu_config.QemuArchParams, cross_compile: Optional[str]): super().__init__(linux_arch=qemu_arch_params.linux_arch, cross_compile=cross_compile) self._kconfig = qemu_arch_params.kconfig self._qemu_arch = qemu_arch_params.qemu_arch self._kernel_path = qemu_arch_params.kernel_path self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot' self._extra_qemu_params = qemu_arch_params.extra_qemu_params def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: kconfig = kunit_config.parse_from_string(self._kconfig) kconfig.merge_in_entries(base_kunitconfig) return kconfig def start(self, params: List[str], build_dir: str) -> subprocess.Popen: kernel_path = os.path.join(build_dir, self._kernel_path) qemu_command = ['qemu-system-' + self._qemu_arch, '-nodefaults', '-m', '1024', '-kernel', kernel_path, '-append', ' '.join(params + [self._kernel_command_line]), '-no-reboot', '-nographic', '-serial', 'stdio'] + self._extra_qemu_params # Note: shlex.join() does what we want, but requires python 3.8+. print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command)) return subprocess.Popen(qemu_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, errors='backslashreplace') class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations): """An abstraction over command line operations performed on a source tree.""" def __init__(self, cross_compile=None): super().__init__(linux_arch='um', cross_compile=cross_compile) def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: kconfig = kunit_config.parse_file(UML_KCONFIG_PATH) kconfig.merge_in_entries(base_kunitconfig) return kconfig def start(self, params: List[str], build_dir: str) -> subprocess.Popen: """Runs the Linux UML binary. Must be named 'linux'.""" linux_bin = os.path.join(build_dir, 'linux') params.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt']) return subprocess.Popen([linux_bin] + params, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, errors='backslashreplace') def get_kconfig_path(build_dir: str) -> str: return os.path.join(build_dir, KCONFIG_PATH) def get_kunitconfig_path(build_dir: str) -> str: return os.path.join(build_dir, KUNITCONFIG_PATH) def get_old_kunitconfig_path(build_dir: str) -> str: return os.path.join(build_dir, OLD_KUNITCONFIG_PATH) def get_parsed_kunitconfig(build_dir: str, kunitconfig_paths: Optional[List[str]]=None) -> kunit_config.Kconfig: if not kunitconfig_paths: path = get_kunitconfig_path(build_dir) if not os.path.exists(path): shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, path) return kunit_config.parse_file(path) merged = kunit_config.Kconfig() for path in kunitconfig_paths: if os.path.isdir(path): path = os.path.join(path, KUNITCONFIG_PATH) if not os.path.exists(path): raise ConfigError(f'Specified kunitconfig ({path}) does not exist') partial = kunit_config.parse_file(path) diff = merged.conflicting_options(partial) if diff: diff_str = '\n\n'.join(f'{a}\n vs from {path}\n{b}' for a, b in diff) raise ConfigError(f'Multiple values specified for {len(diff)} options in kunitconfig:\n{diff_str}') merged.merge_in_entries(partial) return merged def get_outfile_path(build_dir: str) -> str: return os.path.join(build_dir, OUTFILE_PATH) def _default_qemu_config_path(arch: str) -> str: config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py') if os.path.isfile(config_path): return config_path options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')] raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options))) def _get_qemu_ops(config_path: str, extra_qemu_args: Optional[List[str]], cross_compile: Optional[str]) -> Tuple[str, LinuxSourceTreeOperations]: # The module name/path has very little to do with where the actual file # exists (I learned this through experimentation and could not find it # anywhere in the Python documentation). # # Bascially, we completely ignore the actual file location of the config # we are loading and just tell Python that the module lives in the # QEMU_CONFIGS_DIR for import purposes regardless of where it actually # exists as a file. module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path)) spec = importlib.util.spec_from_file_location(module_path, config_path) assert spec is not None config = importlib.util.module_from_spec(spec) # See https://github.com/python/typeshed/pull/2626 for context. assert isinstance(spec.loader, importlib.abc.Loader) spec.loader.exec_module(config) if not hasattr(config, 'QEMU_ARCH'): raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path) params: qemu_config.QemuArchParams = config.QEMU_ARCH # type: ignore if extra_qemu_args: params.extra_qemu_params.extend(extra_qemu_args) return params.linux_arch, LinuxSourceTreeOperationsQemu( params, cross_compile=cross_compile) class LinuxSourceTree: """Represents a Linux kernel source tree with KUnit tests.""" def __init__( self, build_dir: str, kunitconfig_paths: Optional[List[str]]=None, kconfig_add: Optional[List[str]]=None, arch=None, cross_compile=None, qemu_config_path=None, extra_qemu_args=None) -> None: signal.signal(signal.SIGINT, self.signal_handler) if qemu_config_path: self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile) else: self._arch = 'um' if arch is None else arch if self._arch == 'um': self._ops = LinuxSourceTreeOperationsUml(cross_compile=cross_compile) else: qemu_config_path = _default_qemu_config_path(self._arch) _, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile) self._kconfig = get_parsed_kunitconfig(build_dir, kunitconfig_paths) if kconfig_add: kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add)) self._kconfig.merge_in_entries(kconfig) def arch(self) -> str: return self._arch def clean(self) -> bool: try: self._ops.make_mrproper() except ConfigError as e: logging.error(e) return False return True def validate_config(self, build_dir: str) -> bool: kconfig_path = get_kconfig_path(build_dir) validated_kconfig = kunit_config.parse_file(kconfig_path) if self._kconfig.is_subset_of(validated_kconfig): return True missing = set(self._kconfig.as_entries()) - set(validated_kconfig.as_entries()) message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \ 'This is probably due to unsatisfied dependencies.\n' \ 'Missing: ' + ', '.join(str(e) for e in missing) if self._arch == 'um': message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \ 'on a different architecture with something like "--arch=x86_64".' logging.error(message) return False def build_config(self, build_dir: str, make_options) -> bool: kconfig_path = get_kconfig_path(build_dir) if build_dir and not os.path.exists(build_dir): os.mkdir(build_dir) try: self._kconfig = self._ops.make_arch_config(self._kconfig) self._kconfig.write_to_file(kconfig_path) self._ops.make_olddefconfig(build_dir, make_options) except ConfigError as e: logging.error(e) return False if not self.validate_config(build_dir): return False old_path = get_old_kunitconfig_path(build_dir) if os.path.exists(old_path): os.remove(old_path) # write_to_file appends to the file self._kconfig.write_to_file(old_path) return True def _kunitconfig_changed(self, build_dir: str) -> bool: old_path = get_old_kunitconfig_path(build_dir) if not os.path.exists(old_path): return True old_kconfig = kunit_config.parse_file(old_path) return old_kconfig != self._kconfig def build_reconfig(self, build_dir: str, make_options) -> bool: """Creates a new .config if it is not a subset of the .kunitconfig.""" kconfig_path = get_kconfig_path(build_dir) if not os.path.exists(kconfig_path): print('Generating .config ...') return self.build_config(build_dir, make_options) existing_kconfig = kunit_config.parse_file(kconfig_path) self._kconfig = self._ops.make_arch_config(self._kconfig) if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir): return True print('Regenerating .config ...') os.remove(kconfig_path) return self.build_config(build_dir, make_options) def build_kernel(self, jobs, build_dir: str, make_options) -> bool: try: self._ops.make_olddefconfig(build_dir, make_options) self._ops.make(jobs, build_dir, make_options) except (ConfigError, BuildError) as e: logging.error(e) return False return self.validate_config(build_dir) def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]: if not args: args = [] if filter_glob: args.append('kunit.filter_glob='+filter_glob) args.append('kunit.enable=1') process = self._ops.start(args, build_dir) assert process.stdout is not None # tell mypy it's set # Enforce the timeout in a background thread. def _wait_proc(): try: process.wait(timeout=timeout) except Exception as e: print(e) process.terminate() process.wait() waiter = threading.Thread(target=_wait_proc) waiter.start() output = open(get_outfile_path(build_dir), 'w') try: # Tee the output to the file and to our caller in real time. for line in process.stdout: output.write(line) yield line # This runs even if our caller doesn't consume every line. finally: # Flush any leftover output to the file output.write(process.stdout.read()) output.close() process.stdout.close() waiter.join() subprocess.call(['stty', 'sane']) def signal_handler(self, unused_sig, unused_frame) -> None: logging.error('Build interruption occurred. Cleaning console.') subprocess.call(['stty', 'sane'])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit_kernel.py
# SPDX-License-Identifier: GPL-2.0 # # Builds a .config from a kunitconfig. # # Copyright (C) 2019, Google LLC. # Author: Felix Guo <[email protected]> # Author: Brendan Higgins <[email protected]> from dataclasses import dataclass import re from typing import Dict, Iterable, List, Set, Tuple CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$' CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$' @dataclass(frozen=True) class KconfigEntry: name: str value: str def __str__(self) -> str: if self.value == 'n': return f'# CONFIG_{self.name} is not set' return f'CONFIG_{self.name}={self.value}' class KconfigParseError(Exception): """Error parsing Kconfig defconfig or .config.""" class Kconfig: """Represents defconfig or .config specified using the Kconfig language.""" def __init__(self) -> None: self._entries = {} # type: Dict[str, str] def __eq__(self, other) -> bool: if not isinstance(other, self.__class__): return False return self._entries == other._entries def __repr__(self) -> str: return ','.join(str(e) for e in self.as_entries()) def as_entries(self) -> Iterable[KconfigEntry]: for name, value in self._entries.items(): yield KconfigEntry(name, value) def add_entry(self, name: str, value: str) -> None: self._entries[name] = value def is_subset_of(self, other: 'Kconfig') -> bool: for name, value in self._entries.items(): b = other._entries.get(name) if b is None: if value == 'n': continue return False if value != b: return False return True def conflicting_options(self, other: 'Kconfig') -> List[Tuple[KconfigEntry, KconfigEntry]]: diff = [] # type: List[Tuple[KconfigEntry, KconfigEntry]] for name, value in self._entries.items(): b = other._entries.get(name) if b and value != b: pair = (KconfigEntry(name, value), KconfigEntry(name, b)) diff.append(pair) return diff def merge_in_entries(self, other: 'Kconfig') -> None: for name, value in other._entries.items(): self._entries[name] = value def write_to_file(self, path: str) -> None: with open(path, 'a+') as f: for e in self.as_entries(): f.write(str(e) + '\n') def parse_file(path: str) -> Kconfig: with open(path, 'r') as f: return parse_from_string(f.read()) def parse_from_string(blob: str) -> Kconfig: """Parses a string containing Kconfig entries.""" kconfig = Kconfig() is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN) config_matcher = re.compile(CONFIG_PATTERN) for line in blob.split('\n'): line = line.strip() if not line: continue match = config_matcher.match(line) if match: kconfig.add_entry(match.group(1), match.group(2)) continue empty_match = is_not_set_matcher.match(line) if empty_match: kconfig.add_entry(empty_match.group(1), 'n') continue if line[0] == '#': continue raise KconfigParseError('Failed to parse: ' + line) return kconfig
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/kunit_config.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='x86_64', kconfig=''' CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y''', qemu_arch='x86_64', kernel_path='arch/x86/boot/bzImage', kernel_command_line='console=ttyS0', extra_qemu_params=[])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/x86_64.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='i386', kconfig=''' CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y''', qemu_arch='i386', kernel_path='arch/x86/boot/bzImage', kernel_command_line='console=ttyS0', extra_qemu_params=[])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/i386.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='sparc', kconfig=''' CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y''', qemu_arch='sparc', kernel_path='arch/sparc/boot/zImage', kernel_command_line='console=ttyS0 mem=256M', extra_qemu_params=['-m', '256'])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/sparc.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='arm64', kconfig=''' CONFIG_SERIAL_AMBA_PL010=y CONFIG_SERIAL_AMBA_PL010_CONSOLE=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', qemu_arch='aarch64', kernel_path='arch/arm64/boot/Image.gz', kernel_command_line='console=ttyAMA0', extra_qemu_params=['-machine', 'virt', '-cpu', 'cortex-a57'])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/arm64.py
from ..qemu_config import QemuArchParams import os import os.path import sys OPENSBI_FILE = 'opensbi-riscv64-generic-fw_dynamic.bin' OPENSBI_PATH = '/usr/share/qemu/' + OPENSBI_FILE if not os.path.isfile(OPENSBI_PATH): print('\n\nOpenSBI bios was not found in "' + OPENSBI_PATH + '".\n' 'Please ensure that qemu-system-riscv is installed, or edit the path in "qemu_configs/riscv.py"\n') sys.exit() QEMU_ARCH = QemuArchParams(linux_arch='riscv', kconfig=''' CONFIG_SOC_VIRT=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_RISCV_SBI_V01=y CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''', qemu_arch='riscv64', kernel_path='arch/riscv/boot/Image', kernel_command_line='console=ttyS0', extra_qemu_params=[ '-machine', 'virt', '-cpu', 'rv64', '-bios', OPENSBI_PATH])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/riscv.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='arm', kconfig=''' CONFIG_ARCH_VIRT=y CONFIG_SERIAL_AMBA_PL010=y CONFIG_SERIAL_AMBA_PL010_CONSOLE=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', qemu_arch='arm', kernel_path='arch/arm/boot/zImage', kernel_command_line='console=ttyAMA0', extra_qemu_params=['-machine', 'virt'])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/arm.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='s390', kconfig=''' CONFIG_EXPERT=y CONFIG_TUNE_ZEC12=y CONFIG_NUMA=y CONFIG_MODULES=y''', qemu_arch='s390x', kernel_path='arch/s390/boot/bzImage', kernel_command_line='console=ttyS0', extra_qemu_params=[ '-machine', 's390-ccw-virtio', '-cpu', 'qemu',])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/s390.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='alpha', kconfig=''' CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y''', qemu_arch='alpha', kernel_path='arch/alpha/boot/vmlinux', kernel_command_line='console=ttyS0', extra_qemu_params=[])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/alpha.py
from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='powerpc', kconfig=''' CONFIG_PPC64=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_HVC_CONSOLE=y''', qemu_arch='ppc64', kernel_path='vmlinux', kernel_command_line='console=ttyS0', extra_qemu_params=['-M', 'pseries', '-cpu', 'power8'])
grace-kernel-grace-kernel-6.1.y
tools/testing/kunit/qemu_configs/powerpc.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <[email protected]> # # Automata object: parse an automata in dot file digraph format into a python object # # For further information, see: # Documentation/trace/rv/deterministic_automata.rst import ntpath class Automata: """Automata class: Reads a dot file and part it as an automata. Attributes: dot_file: A dot file with an state_automaton definition. """ invalid_state_str = "INVALID_STATE" def __init__(self, file_path): self.__dot_path = file_path self.name = self.__get_model_name() self.__dot_lines = self.__open_dot() self.states, self.initial_state, self.final_states = self.__get_state_variables() self.events = self.__get_event_variables() self.function = self.__create_matrix() def __get_model_name(self): basename = ntpath.basename(self.__dot_path) if basename.endswith(".dot") == False: print("not a dot file") raise Exception("not a dot file: %s" % self.__dot_path) model_name = basename[0:-4] if model_name.__len__() == 0: raise Exception("not a dot file: %s" % self.__dot_path) return model_name def __open_dot(self): cursor = 0 dot_lines = [] try: dot_file = open(self.__dot_path) except: raise Exception("Cannot open the file: %s" % self.__dot_path) dot_lines = dot_file.read().splitlines() dot_file.close() # checking the first line: line = dot_lines[cursor].split() if (line[0] != "digraph") and (line[1] != "state_automaton"): raise Exception("Not a valid .dot format: %s" % self.__dot_path) else: cursor += 1 return dot_lines def __get_cursor_begin_states(self): cursor = 0 while self.__dot_lines[cursor].split()[0] != "{node": cursor += 1 return cursor def __get_cursor_begin_events(self): cursor = 0 while self.__dot_lines[cursor].split()[0] != "{node": cursor += 1 while self.__dot_lines[cursor].split()[0] == "{node": cursor += 1 # skip initial state transition cursor += 1 return cursor def __get_state_variables(self): # wait for node declaration states = [] final_states = [] has_final_states = False cursor = self.__get_cursor_begin_states() # process nodes while self.__dot_lines[cursor].split()[0] == "{node": line = self.__dot_lines[cursor].split() raw_state = line[-1] # "enabled_fired"}; -> enabled_fired state = raw_state.replace('"', '').replace('};', '').replace(',','_') if state[0:7] == "__init_": initial_state = state[7:] else: states.append(state) if self.__dot_lines[cursor].__contains__("doublecircle") == True: final_states.append(state) has_final_states = True if self.__dot_lines[cursor].__contains__("ellipse") == True: final_states.append(state) has_final_states = True cursor += 1 states = sorted(set(states)) states.remove(initial_state) # Insert the initial state at the bein og the states states.insert(0, initial_state) if has_final_states == False: final_states.append(initial_state) return states, initial_state, final_states def __get_event_variables(self): # here we are at the begin of transitions, take a note, we will return later. cursor = self.__get_cursor_begin_events() events = [] while self.__dot_lines[cursor][1] == '"': # transitions have the format: # "all_fired" -> "both_fired" [ label = "disable_irq" ]; # ------------ event is here ------------^^^^^ if self.__dot_lines[cursor].split()[1] == "->": line = self.__dot_lines[cursor].split() event = line[-2].replace('"','') # when a transition has more than one lables, they are like this # "local_irq_enable\nhw_local_irq_enable_n" # so split them. event = event.replace("\\n", " ") for i in event.split(): events.append(i) cursor += 1 return sorted(set(events)) def __create_matrix(self): # transform the array into a dictionary events = self.events states = self.states events_dict = {} states_dict = {} nr_event = 0 for event in events: events_dict[event] = nr_event nr_event += 1 nr_state = 0 for state in states: states_dict[state] = nr_state nr_state += 1 # declare the matrix.... matrix = [[ self.invalid_state_str for x in range(nr_event)] for y in range(nr_state)] # and we are back! Let's fill the matrix cursor = self.__get_cursor_begin_events() while self.__dot_lines[cursor][1] == '"': if self.__dot_lines[cursor].split()[1] == "->": line = self.__dot_lines[cursor].split() origin_state = line[0].replace('"','').replace(',','_') dest_state = line[2].replace('"','').replace(',','_') possible_events = line[-2].replace('"','').replace("\\n", " ") for event in possible_events.split(): matrix[states_dict[origin_state]][events_dict[event]] = dest_state cursor += 1 return matrix
grace-kernel-grace-kernel-6.1.y
tools/verification/dot2/automata.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <[email protected]> # # dot2k: transform dot files into a monitor for the Linux kernel. # # For further information, see: # Documentation/trace/rv/da_monitor_synthesis.rst from dot2.dot2c import Dot2c import platform import os class dot2k(Dot2c): monitor_types = { "global" : 1, "per_cpu" : 2, "per_task" : 3 } monitor_templates_dir = "dot2k/rv_templates/" monitor_type = "per_cpu" def __init__(self, file_path, MonitorType): super().__init__(file_path) self.monitor_type = self.monitor_types.get(MonitorType) if self.monitor_type == None: raise Exception("Unknown monitor type: %s" % MonitorType) self.monitor_type = MonitorType self.__fill_rv_templates_dir() self.main_c = self.__open_file(self.monitor_templates_dir + "main_" + MonitorType + ".c") self.enum_suffix = "_%s" % self.name def __fill_rv_templates_dir(self): if os.path.exists(self.monitor_templates_dir) == True: return if platform.system() != "Linux": raise Exception("I can only run on Linux.") kernel_path = "/lib/modules/%s/build/tools/verification/dot2/dot2k_templates/" % (platform.release()) if os.path.exists(kernel_path) == True: self.monitor_templates_dir = kernel_path return if os.path.exists("/usr/share/dot2/dot2k_templates/") == True: self.monitor_templates_dir = "/usr/share/dot2/dot2k_templates/" return raise Exception("Could not find the template directory, do you have the kernel source installed?") def __open_file(self, path): try: fd = open(path) except OSError: raise Exception("Cannot open the file: %s" % path) content = fd.read() return content def __buff_to_string(self, buff): string = "" for line in buff: string = string + line + "\n" # cut off the last \n return string[:-1] def fill_tracepoint_handlers_skel(self): buff = [] for event in self.events: buff.append("static void handle_%s(void *data, /* XXX: fill header */)" % event) buff.append("{") if self.monitor_type == "per_task": buff.append("\tstruct task_struct *p = /* XXX: how do I get p? */;"); buff.append("\tda_handle_event_%s(p, %s%s);" % (self.name, event, self.enum_suffix)); else: buff.append("\tda_handle_event_%s(%s%s);" % (self.name, event, self.enum_suffix)); buff.append("}") buff.append("") return self.__buff_to_string(buff) def fill_tracepoint_attach_probe(self): buff = [] for event in self.events: buff.append("\trv_attach_trace_probe(\"%s\", /* XXX: tracepoint */, handle_%s);" % (self.name, event)) return self.__buff_to_string(buff) def fill_tracepoint_detach_helper(self): buff = [] for event in self.events: buff.append("\trv_detach_trace_probe(\"%s\", /* XXX: tracepoint */, handle_%s);" % (self.name, event)) return self.__buff_to_string(buff) def fill_main_c(self): main_c = self.main_c min_type = self.get_minimun_type() nr_events = self.events.__len__() tracepoint_handlers = self.fill_tracepoint_handlers_skel() tracepoint_attach = self.fill_tracepoint_attach_probe() tracepoint_detach = self.fill_tracepoint_detach_helper() main_c = main_c.replace("MIN_TYPE", min_type) main_c = main_c.replace("MODEL_NAME", self.name) main_c = main_c.replace("NR_EVENTS", str(nr_events)) main_c = main_c.replace("TRACEPOINT_HANDLERS_SKEL", tracepoint_handlers) main_c = main_c.replace("TRACEPOINT_ATTACH", tracepoint_attach) main_c = main_c.replace("TRACEPOINT_DETACH", tracepoint_detach) return main_c def fill_model_h_header(self): buff = [] buff.append("/*") buff.append(" * Automatically generated C representation of %s automaton" % (self.name)) buff.append(" * For further information about this format, see kernel documentation:") buff.append(" * Documentation/trace/rv/deterministic_automata.rst") buff.append(" */") buff.append("") return buff def fill_model_h(self): # # Adjust the definition names # self.enum_states_def = "states_%s" % self.name self.enum_events_def = "events_%s" % self.name self.struct_automaton_def = "automaton_%s" % self.name self.var_automaton_def = "automaton_%s" % self.name buff = self.fill_model_h_header() buff += self.format_model() return self.__buff_to_string(buff) def __create_directory(self): try: os.mkdir(self.name) except FileExistsError: return except: print("Fail creating the output dir: %s" % self.name) def __create_file(self, file_name, content): path = "%s/%s" % (self.name, file_name) try: file = open(path, 'w') except FileExistsError: return except: print("Fail creating file: %s" % path) file.write(content) file.close() def __get_main_name(self): path = "%s/%s" % (self.name, "main.c") if os.path.exists(path) == False: return "main.c" return "__main.c" def print_files(self): main_c = self.fill_main_c() model_h = self.fill_model_h() self.__create_directory() path = "%s.c" % self.name self.__create_file(path, main_c) path = "%s.h" % self.name self.__create_file(path, model_h)
grace-kernel-grace-kernel-6.1.y
tools/verification/dot2/dot2k.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <[email protected]> # # dot2c: parse an automata in dot file digraph format into a C # # This program was written in the development of this paper: # de Oliveira, D. B. and Cucinotta, T. and de Oliveira, R. S. # "Efficient Formal Verification for the Linux Kernel." International # Conference on Software Engineering and Formal Methods. Springer, Cham, 2019. # # For further information, see: # Documentation/trace/rv/deterministic_automata.rst from dot2.automata import Automata class Dot2c(Automata): enum_suffix = "" enum_states_def = "states" enum_events_def = "events" struct_automaton_def = "automaton" var_automaton_def = "aut" def __init__(self, file_path): super().__init__(file_path) self.line_length = 100 def __buff_to_string(self, buff): string = "" for line in buff: string = string + line + "\n" # cut off the last \n return string[:-1] def __get_enum_states_content(self): buff = [] buff.append("\t%s%s = 0," % (self.initial_state, self.enum_suffix)) for state in self.states: if state != self.initial_state: buff.append("\t%s%s," % (state, self.enum_suffix)) buff.append("\tstate_max%s" % (self.enum_suffix)) return buff def get_enum_states_string(self): buff = self.__get_enum_states_content() return self.__buff_to_string(buff) def format_states_enum(self): buff = [] buff.append("enum %s {" % self.enum_states_def) buff.append(self.get_enum_states_string()) buff.append("};\n") return buff def __get_enum_events_content(self): buff = [] first = True for event in self.events: if first: buff.append("\t%s%s = 0," % (event, self.enum_suffix)) first = False else: buff.append("\t%s%s," % (event, self.enum_suffix)) buff.append("\tevent_max%s" % self.enum_suffix) return buff def get_enum_events_string(self): buff = self.__get_enum_events_content() return self.__buff_to_string(buff) def format_events_enum(self): buff = [] buff.append("enum %s {" % self.enum_events_def) buff.append(self.get_enum_events_string()) buff.append("};\n") return buff def get_minimun_type(self): min_type = "unsigned char" if self.states.__len__() > 255: min_type = "unsigned short" if self.states.__len__() > 65535: min_type = "unsigned int" if self.states.__len__() > 1000000: raise Exception("Too many states: %d" % self.states.__len__()) return min_type def format_automaton_definition(self): min_type = self.get_minimun_type() buff = [] buff.append("struct %s {" % self.struct_automaton_def) buff.append("\tchar *state_names[state_max%s];" % (self.enum_suffix)) buff.append("\tchar *event_names[event_max%s];" % (self.enum_suffix)) buff.append("\t%s function[state_max%s][event_max%s];" % (min_type, self.enum_suffix, self.enum_suffix)) buff.append("\t%s initial_state;" % min_type) buff.append("\tbool final_states[state_max%s];" % (self.enum_suffix)) buff.append("};\n") return buff def format_aut_init_header(self): buff = [] buff.append("static struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def)) return buff def __get_string_vector_per_line_content(self, buff): first = True string = "" for entry in buff: if first: string = string + "\t\t\"" + entry first = False; else: string = string + "\",\n\t\t\"" + entry string = string + "\"" return string def get_aut_init_events_string(self): return self.__get_string_vector_per_line_content(self.events) def get_aut_init_states_string(self): return self.__get_string_vector_per_line_content(self.states) def format_aut_init_events_string(self): buff = [] buff.append("\t.event_names = {") buff.append(self.get_aut_init_events_string()) buff.append("\t},") return buff def format_aut_init_states_string(self): buff = [] buff.append("\t.state_names = {") buff.append(self.get_aut_init_states_string()) buff.append("\t},") return buff def __get_max_strlen_of_states(self): max_state_name = max(self.states, key = len).__len__() return max(max_state_name, self.invalid_state_str.__len__()) def __get_state_string_length(self): maxlen = self.__get_max_strlen_of_states() + self.enum_suffix.__len__() return "%" + str(maxlen) + "s" def get_aut_init_function(self): nr_states = self.states.__len__() nr_events = self.events.__len__() buff = [] strformat = self.__get_state_string_length() for x in range(nr_states): line = "\t\t{ " for y in range(nr_events): next_state = self.function[x][y] if next_state != self.invalid_state_str: next_state = self.function[x][y] + self.enum_suffix if y != nr_events-1: line = line + strformat % next_state + ", " else: line = line + strformat % next_state + " }," buff.append(line) return self.__buff_to_string(buff) def format_aut_init_function(self): buff = [] buff.append("\t.function = {") buff.append(self.get_aut_init_function()) buff.append("\t},") return buff def get_aut_init_initial_state(self): return self.initial_state def format_aut_init_initial_state(self): buff = [] initial_state = self.get_aut_init_initial_state() buff.append("\t.initial_state = " + initial_state + self.enum_suffix + ",") return buff def get_aut_init_final_states(self): line = "" first = True for state in self.states: if first == False: line = line + ', ' else: first = False if self.final_states.__contains__(state): line = line + '1' else: line = line + '0' return line def format_aut_init_final_states(self): buff = [] buff.append("\t.final_states = { %s }," % self.get_aut_init_final_states()) return buff def __get_automaton_initialization_footer_string(self): footer = "};\n" return footer def format_aut_init_footer(self): buff = [] buff.append(self.__get_automaton_initialization_footer_string()) return buff def format_invalid_state(self): buff = [] buff.append("#define %s state_max%s\n" % (self.invalid_state_str, self.enum_suffix)) return buff def format_model(self): buff = [] buff += self.format_states_enum() buff += self.format_invalid_state() buff += self.format_events_enum() buff += self.format_automaton_definition() buff += self.format_aut_init_header() buff += self.format_aut_init_states_string() buff += self.format_aut_init_events_string() buff += self.format_aut_init_function() buff += self.format_aut_init_initial_state() buff += self.format_aut_init_final_states() buff += self.format_aut_init_footer() return buff def print_model_classic(self): buff = self.format_model() print(self.__buff_to_string(buff))
grace-kernel-grace-kernel-6.1.y
tools/verification/dot2/dot2c.py
#!/usr/bin/env drgn # SPDX-License-Identifier: GPL-2.0+ # # Dump out the number of RCU callbacks outstanding. # # On older kernels having multiple flavors of RCU, this dumps out the # number of callbacks for the most heavily used flavor. # # Usage: sudo drgn rcu-cbs.py # # Copyright (C) 2021 Facebook, Inc. # # Authors: Paul E. McKenney <[email protected]> import sys import drgn from drgn import NULL, Object from drgn.helpers.linux import * def get_rdp0(prog): try: rdp0 = prog.variable('rcu_preempt_data', 'kernel/rcu/tree.c'); except LookupError: rdp0 = NULL; if rdp0 == NULL: try: rdp0 = prog.variable('rcu_sched_data', 'kernel/rcu/tree.c'); except LookupError: rdp0 = NULL; if rdp0 == NULL: rdp0 = prog.variable('rcu_data', 'kernel/rcu/tree.c'); return rdp0.address_of_(); rdp0 = get_rdp0(prog); # Sum up RCU callbacks. sum = 0; for cpu in for_each_possible_cpu(prog): rdp = per_cpu_ptr(rdp0, cpu); len = rdp.cblist.len.value_(); # print("CPU " + str(cpu) + " RCU callbacks: " + str(len)); sum += len; print("Number of RCU callbacks in flight: " + str(sum));
grace-kernel-grace-kernel-6.1.y
tools/rcu/rcu-cbs.py
#!/usr/bin/env python3 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) """Convert directories of JSON events to C code.""" import argparse import csv import json import os import sys from typing import (Callable, Dict, Optional, Sequence, Set, Tuple) import collections # Global command line arguments. _args = None # List of event tables generated from "/sys" directories. _sys_event_tables = [] # Map from an event name to an architecture standard # JsonEvent. Architecture standard events are in json files in the top # f'{_args.starting_dir}/{_args.arch}' directory. _arch_std_events = {} # Track whether an events table is currently being defined and needs closing. _close_table = False # Events to write out when the table is closed _pending_events = [] # Global BigCString shared by all structures. _bcs = None # Order specific JsonEvent attributes will be visited. _json_event_attributes = [ # cmp_sevent related attributes. 'name', 'pmu', 'topic', 'desc', 'metric_name', 'metric_group', # Seems useful, put it early. 'event', # Short things in alphabetical order. 'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit', # Longer things (the last won't be iterated over during decompress). 'metric_constraint', 'metric_expr', 'long_desc' ] def removesuffix(s: str, suffix: str) -> str: """Remove the suffix from a string The removesuffix function is added to str in Python 3.9. We aim for 3.6 compatibility and so provide our own function here. """ return s[0:-len(suffix)] if s.endswith(suffix) else s def file_name_to_table_name(parents: Sequence[str], dirname: str) -> str: """Generate a C table name from directory names.""" tblname = 'pme' for p in parents: tblname += '_' + p tblname += '_' + dirname return tblname.replace('-', '_') def c_len(s: str) -> int: """Return the length of s a C string This doesn't handle all escape characters properly. It first assumes all \ are for escaping, it then adjusts as it will have over counted \\. The code uses \000 rather than \0 as a terminator as an adjacent number would be folded into a string of \0 (ie. "\0" + "5" doesn't equal a terminator followed by the number 5 but the escape of \05). The code adjusts for \000 but not properly for all octal, hex or unicode values. """ try: utf = s.encode(encoding='utf-8',errors='strict') except: print(f'broken string {s}') raise return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2) class BigCString: """A class to hold many strings concatenated together. Generating a large number of stand-alone C strings creates a large number of relocations in position independent code. The BigCString is a helper for this case. It builds a single string which within it are all the other C strings (to avoid memory issues the string itself is held as a list of strings). The offsets within the big string are recorded and when stored to disk these don't need relocation. To reduce the size of the string further, identical strings are merged. If a longer string ends-with the same value as a shorter string, these entries are also merged. """ strings: Set[str] big_string: Sequence[str] offsets: Dict[str, int] def __init__(self): self.strings = set() def add(self, s: str) -> None: """Called to add to the big string.""" self.strings.add(s) def compute(self) -> None: """Called once all strings are added to compute the string and offsets.""" folded_strings = {} # Determine if two strings can be folded, ie. let 1 string use the # end of another. First reverse all strings and sort them. sorted_reversed_strings = sorted([x[::-1] for x in self.strings]) # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward # for each string to see if there is a better candidate to fold it # into, in the example rather than using 'yz' we can use'xyz' at # an offset of 1. We record which string can be folded into which # in folded_strings, we don't need to record the offset as it is # trivially computed from the string lengths. for pos,s in enumerate(sorted_reversed_strings): best_pos = pos for check_pos in range(pos + 1, len(sorted_reversed_strings)): if sorted_reversed_strings[check_pos].startswith(s): best_pos = check_pos else: break if pos != best_pos: folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1] # Compute reverse mappings for debugging. fold_into_strings = collections.defaultdict(set) for key, val in folded_strings.items(): if key != val: fold_into_strings[val].add(key) # big_string_offset is the current location within the C string # being appended to - comments, etc. don't count. big_string is # the string contents represented as a list. Strings are immutable # in Python and so appending to one causes memory issues, while # lists are mutable. big_string_offset = 0 self.big_string = [] self.offsets = {} # Emit all strings that aren't folded in a sorted manner. for s in sorted(self.strings): if s not in folded_strings: self.offsets[s] = big_string_offset self.big_string.append(f'/* offset={big_string_offset} */ "') self.big_string.append(s) self.big_string.append('"') if s in fold_into_strings: self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */') self.big_string.append('\n') big_string_offset += c_len(s) continue # Compute the offsets of the folded strings. for s in folded_strings.keys(): assert s not in self.offsets folded_s = folded_strings[s] self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s) _bcs = BigCString() class JsonEvent: """Representation of an event loaded from a json file dictionary.""" def __init__(self, jd: dict): """Constructor passed the dictionary of parsed json values.""" def llx(x: int) -> str: """Convert an int to a string similar to a printf modifier of %#llx.""" return '0' if x == 0 else hex(x) def fixdesc(s: str) -> str: """Fix formatting issue for the desc string.""" if s is None: return None return removesuffix(removesuffix(removesuffix(s, '. '), '. '), '.').replace('\n', '\\n').replace( '\"', '\\"').replace('\r', '\\r') def convert_aggr_mode(aggr_mode: str) -> Optional[str]: """Returns the aggr_mode_class enum value associated with the JSON string.""" if not aggr_mode: return None aggr_mode_to_enum = { 'PerChip': '1', 'PerCore': '2', } return aggr_mode_to_enum[aggr_mode] def lookup_msr(num: str) -> Optional[str]: """Converts the msr number, or first in a list to the appropriate event field.""" if not num: return None msrmap = { 0x3F6: 'ldlat=', 0x1A6: 'offcore_rsp=', 0x1A7: 'offcore_rsp=', 0x3F7: 'frontend=', } return msrmap[int(num.split(',', 1)[0], 0)] def real_event(name: str, event: str) -> Optional[str]: """Convert well known event names to an event string otherwise use the event argument.""" fixed = { 'inst_retired.any': 'event=0xc0,period=2000003', 'inst_retired.any_p': 'event=0xc0,period=2000003', 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003', 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003', 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003', 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003', } if not name: return None if name.lower() in fixed: return fixed[name.lower()] return event def unit_to_pmu(unit: str) -> Optional[str]: """Convert a JSON Unit to Linux PMU name.""" if not unit: return None # Comment brought over from jevents.c: # it's not realistic to keep adding these, we need something more scalable ... table = { 'CBO': 'uncore_cbox', 'QPI LL': 'uncore_qpi', 'SBO': 'uncore_sbox', 'iMPH-U': 'uncore_arb', 'CPU-M-CF': 'cpum_cf', 'CPU-M-SF': 'cpum_sf', 'PAI-CRYPTO' : 'pai_crypto', 'UPI LL': 'uncore_upi', 'hisi_sicl,cpa': 'hisi_sicl,cpa', 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 'hisi_sccl,hha': 'hisi_sccl,hha', 'hisi_sccl,l3c': 'hisi_sccl,l3c', 'imx8_ddr': 'imx8_ddr', 'L3PMC': 'amd_l3', 'DFPMC': 'amd_df', 'cpu_core': 'cpu_core', 'cpu_atom': 'cpu_atom', } return table[unit] if unit in table else f'uncore_{unit.lower()}' eventcode = 0 if 'EventCode' in jd: eventcode = int(jd['EventCode'].split(',', 1)[0], 0) if 'ExtSel' in jd: eventcode |= int(jd['ExtSel']) << 8 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None self.name = jd['EventName'].lower() if 'EventName' in jd else None self.topic = '' self.compat = jd.get('Compat') self.desc = fixdesc(jd.get('BriefDescription')) self.long_desc = fixdesc(jd.get('PublicDescription')) precise = jd.get('PEBS') msr = lookup_msr(jd.get('MSRIndex')) msrval = jd.get('MSRValue') extra_desc = '' if 'Data_LA' in jd: extra_desc += ' Supports address when precise' if 'Errata' in jd: extra_desc += '.' if 'Errata' in jd: extra_desc += ' Spec update: ' + jd['Errata'] self.pmu = unit_to_pmu(jd.get('Unit')) filter = jd.get('Filter') self.unit = jd.get('ScaleUnit') self.perpkg = jd.get('PerPkg') self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode')) self.deprecated = jd.get('Deprecated') self.metric_name = jd.get('MetricName') self.metric_group = jd.get('MetricGroup') self.metric_constraint = jd.get('MetricConstraint') self.metric_expr = jd.get('MetricExpr') if self.metric_expr: self.metric_expr = self.metric_expr.replace('\\', '\\\\') arch_std = jd.get('ArchStdEvent') if precise and self.desc and '(Precise Event)' not in self.desc: extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 'event)') event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' event_fields = [ ('AnyThread', 'any='), ('PortMask', 'ch_mask='), ('CounterMask', 'cmask='), ('EdgeDetect', 'edge='), ('FCMask', 'fc_mask='), ('Invert', 'inv='), ('SampleAfterValue', 'period='), ('UMask', 'umask='), ] for key, value in event_fields: if key in jd and jd[key] != '0': event += ',' + value + jd[key] if filter: event += f',{filter}' if msr: event += f',{msr}{msrval}' if self.desc and extra_desc: self.desc += extra_desc if self.long_desc and extra_desc: self.long_desc += extra_desc if self.pmu: if self.desc and not self.desc.endswith('. '): self.desc += '. ' self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ') if arch_std and arch_std.lower() in _arch_std_events: event = _arch_std_events[arch_std.lower()].event # Copy from the architecture standard event to self for undefined fields. for attr, value in _arch_std_events[arch_std.lower()].__dict__.items(): if hasattr(self, attr) and not getattr(self, attr): setattr(self, attr, value) self.event = real_event(self.name, event) def __repr__(self) -> str: """String representation primarily for debugging.""" s = '{\n' for attr, value in self.__dict__.items(): if value: s += f'\t{attr} = {value},\n' return s + '}' def build_c_string(self) -> str: s = '' for attr in _json_event_attributes: x = getattr(self, attr) s += f'{x}\\000' if x else '\\000' return s def to_c_string(self) -> str: """Representation of the event as a C struct initializer.""" s = self.build_c_string() return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n' def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]: """Read json events from the specified file.""" try: result = json.load(open(path), object_hook=JsonEvent) except BaseException as err: print(f"Exception processing {path}") raise for event in result: event.topic = topic return result def preprocess_arch_std_files(archpath: str) -> None: """Read in all architecture standard events.""" global _arch_std_events for item in os.scandir(archpath): if item.is_file() and item.name.endswith('.json'): for event in read_json_events(item.path, topic=''): if event.name: _arch_std_events[event.name.lower()] = event def print_events_table_prefix(tblname: str) -> None: """Called when a new events table is started.""" global _close_table if _close_table: raise IOError('Printing table prefix but last table has no suffix') _args.output_file.write(f'static const struct compact_pmu_event {tblname}[] = {{\n') _close_table = True def add_events_table_entries(item: os.DirEntry, topic: str) -> None: """Add contents of file to _pending_events table.""" if not _close_table: raise IOError('Table entries missing prefix') for e in read_json_events(item.path, topic): _pending_events.append(e) def print_events_table_suffix() -> None: """Optionally close events table.""" def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]: def fix_none(s: Optional[str]) -> str: if s is None: return '' return s return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu), fix_none(j.metric_name)) global _close_table if not _close_table: return global _pending_events for event in sorted(_pending_events, key=event_cmp_key): _args.output_file.write(event.to_c_string()) _pending_events = [] _args.output_file.write('};\n\n') _close_table = False def get_topic(topic: str) -> str: if topic.endswith('metrics.json'): return 'metrics' return removesuffix(topic, '.json').replace('-', ' ') def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None: if item.is_dir(): return # base dir or too deep level = len(parents) if level == 0 or level > 4: return # Ignore other directories. If the file name does not have a .json # extension, ignore it. It could be a readme.txt for instance. if not item.is_file() or not item.name.endswith('.json'): return topic = get_topic(item.name) for event in read_json_events(item.path, topic): _bcs.add(event.build_c_string()) def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None: """Process a JSON file during the main walk.""" global _sys_event_tables def is_leaf_dir(path: str) -> bool: for item in os.scandir(path): if item.is_dir(): return False return True # model directory, reset topic if item.is_dir() and is_leaf_dir(item.path): print_events_table_suffix() tblname = file_name_to_table_name(parents, item.name) if item.name == 'sys': _sys_event_tables.append(tblname) print_events_table_prefix(tblname) return # base dir or too deep level = len(parents) if level == 0 or level > 4: return # Ignore other directories. If the file name does not have a .json # extension, ignore it. It could be a readme.txt for instance. if not item.is_file() or not item.name.endswith('.json'): return add_events_table_entries(item, get_topic(item.name)) def print_mapping_table(archs: Sequence[str]) -> None: """Read the mapfile and generate the struct from cpuid string to event table.""" _args.output_file.write(""" /* Struct used to make the PMU event table implementation opaque to callers. */ struct pmu_events_table { const struct compact_pmu_event *entries; size_t length; }; /* * Map a CPU to its table of PMU events. The CPU is identified by the * cpuid field, which is an arch-specific identifier for the CPU. * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) * * The cpuid can contain any character other than the comma. */ struct pmu_events_map { const char *arch; const char *cpuid; struct pmu_events_table table; }; /* * Global table mapping each known CPU for the architecture to its * table of PMU events. */ const struct pmu_events_map pmu_events_map[] = { """) for arch in archs: if arch == 'test': _args.output_file.write("""{ \t.arch = "testarch", \t.cpuid = "testcpu", \t.table = { \t.entries = pme_test_soc_cpu, \t.length = ARRAY_SIZE(pme_test_soc_cpu), \t} }, """) else: with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile: table = csv.reader(csvfile) first = True for row in table: # Skip the first row or any row beginning with #. if not first and len(row) > 0 and not row[0].startswith('#'): tblname = file_name_to_table_name([], row[2].replace('/', '_')) cpuid = row[0].replace('\\', '\\\\') _args.output_file.write(f"""{{ \t.arch = "{arch}", \t.cpuid = "{cpuid}", \t.table = {{ \t\t.entries = {tblname}, \t\t.length = ARRAY_SIZE({tblname}) \t}} }}, """) first = False _args.output_file.write("""{ \t.arch = 0, \t.cpuid = 0, \t.table = { 0, 0 }, } }; """) def print_system_mapping_table() -> None: """C struct mapping table array for tables from /sys directories.""" _args.output_file.write(""" struct pmu_sys_events { \tconst char *name; \tstruct pmu_events_table table; }; static const struct pmu_sys_events pmu_sys_event_tables[] = { """) for tblname in _sys_event_tables: _args.output_file.write(f"""\t{{ \t\t.table = {{ \t\t\t.entries = {tblname}, \t\t\t.length = ARRAY_SIZE({tblname}) \t\t}}, \t\t.name = \"{tblname}\", \t}}, """) _args.output_file.write("""\t{ \t\t.table = { 0, 0 } \t}, }; static void decompress(int offset, struct pmu_event *pe) { \tconst char *p = &big_c_string[offset]; """) for attr in _json_event_attributes: _args.output_file.write(f""" \tpe->{attr} = (*p == '\\0' ? NULL : p); """) if attr == _json_event_attributes[-1]: continue _args.output_file.write('\twhile (*p++);') _args.output_file.write("""} int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_event_iter_fn fn, void *data) { for (size_t i = 0; i < table->length; i++) { struct pmu_event pe; int ret; decompress(table->entries[i].offset, &pe); ret = fn(&pe, table, data); if (ret) return ret; } return 0; } const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu) { const struct pmu_events_table *table = NULL; char *cpuid = perf_pmu__getcpuid(pmu); int i; /* on some platforms which uses cpus map, cpuid can be NULL for * PMUs other than CORE PMUs. */ if (!cpuid) return NULL; i = 0; for (;;) { const struct pmu_events_map *map = &pmu_events_map[i++]; if (!map->arch) break; if (!strcmp_cpuid_str(map->cpuid, cpuid)) { table = &map->table; break; } } free(cpuid); return table; } const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) { for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) { if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) return &tables->table; } return NULL; } int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) { for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) { int ret = pmu_events_table_for_each_event(&tables->table, fn, data); if (ret) return ret; } return 0; } const struct pmu_events_table *find_sys_events_table(const char *name) { for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; tables->name; tables++) { if (!strcmp(tables->name, name)) return &tables->table; } return NULL; } int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) { for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; tables->name; tables++) { int ret = pmu_events_table_for_each_event(&tables->table, fn, data); if (ret) return ret; } return 0; } """) def main() -> None: global _args def dir_path(path: str) -> str: """Validate path is a directory for argparse.""" if os.path.isdir(path): return path raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory') def ftw(path: str, parents: Sequence[str], action: Callable[[Sequence[str], os.DirEntry], None]) -> None: """Replicate the directory/file walking behavior of C's file tree walk.""" for item in os.scandir(path): action(parents, item) if item.is_dir(): ftw(item.path, parents + [item.name], action) ap = argparse.ArgumentParser() ap.add_argument('arch', help='Architecture name like x86') ap.add_argument( 'starting_dir', type=dir_path, help='Root of tree containing architecture directories containing json files' ) ap.add_argument( 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout) _args = ap.parse_args() _args.output_file.write(""" #include "pmu-events/pmu-events.h" #include "util/header.h" #include "util/pmu.h" #include <string.h> #include <stddef.h> struct compact_pmu_event { int offset; }; """) archs = [] for item in os.scandir(_args.starting_dir): if not item.is_dir(): continue if item.name == _args.arch or _args.arch == 'all' or item.name == 'test': archs.append(item.name) if len(archs) < 2: raise IOError(f'Missing architecture directory \'{_args.arch}\'') archs.sort() for arch in archs: arch_path = f'{_args.starting_dir}/{arch}' preprocess_arch_std_files(arch_path) ftw(arch_path, [], preprocess_one_file) _bcs.compute() _args.output_file.write('static const char *const big_c_string =\n') for s in _bcs.big_string: _args.output_file.write(s) _args.output_file.write(';\n\n') for arch in archs: arch_path = f'{_args.starting_dir}/{arch}' ftw(arch_path, [], process_one_file) print_events_table_suffix() print_mapping_table(archs) print_system_mapping_table() if __name__ == '__main__': main()
grace-kernel-grace-kernel-6.1.y
tools/perf/pmu-events/jevents.py
from os import getenv, path from subprocess import Popen, PIPE from re import sub cc = getenv("CC") cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline() src_feature_tests = getenv('srctree') + '/tools/build/feature' def clang_has_option(option): cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines() return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ] if cc_is_clang: from sysconfig import get_config_vars vars = get_config_vars() for var in ('CFLAGS', 'OPT'): vars[var] = sub("-specs=[^ ]+", "", vars[var]) if not clang_has_option("-mcet"): vars[var] = sub("-mcet", "", vars[var]) if not clang_has_option("-fcf-protection"): vars[var] = sub("-fcf-protection", "", vars[var]) if not clang_has_option("-fstack-clash-protection"): vars[var] = sub("-fstack-clash-protection", "", vars[var]) if not clang_has_option("-fstack-protector-strong"): vars[var] = sub("-fstack-protector-strong", "", vars[var]) if not clang_has_option("-fno-semantic-interposition"): vars[var] = sub("-fno-semantic-interposition", "", vars[var]) if not clang_has_option("-ffat-lto-objects"): vars[var] = sub("-ffat-lto-objects", "", vars[var]) from setuptools import setup, Extension from setuptools.command.build_ext import build_ext as _build_ext from setuptools.command.install_lib import install_lib as _install_lib class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) self.build_lib = build_lib self.build_temp = build_tmp class install_lib(_install_lib): def finalize_options(self): _install_lib.finalize_options(self) self.build_dir = build_lib cflags = getenv('CFLAGS', '').split() # switch off several checks (need to be at the end of cflags list) cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls', '-DPYTHON_PERF' ] if cc_is_clang: cflags += ["-Wno-unused-command-line-argument" ] else: cflags += ['-Wno-cast-function-type' ] src_perf = getenv('srctree') + '/tools/perf' build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') libtraceevent = getenv('LIBTRACEEVENT') libapikfs = getenv('LIBAPI') libperf = getenv('LIBPERF') ext_sources = [f.strip() for f in open('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] # use full paths with source files ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)) extra_libraries = [] if '-DHAVE_LIBNUMA_SUPPORT' in cflags: extra_libraries = [ 'numa' ] if '-DHAVE_LIBCAP_SUPPORT' in cflags: extra_libraries += [ 'cap' ] perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], libraries = extra_libraries, extra_compile_args = cflags, extra_objects = [libtraceevent, libapikfs, libperf], ) setup(name='perf', version='0.1', description='Interface with the Linux profiling infrastructure', author='Arnaldo Carvalho de Melo', author_email='[email protected]', license='GPLv2', url='http://perf.wiki.kernel.org', ext_modules=[perf], cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
grace-kernel-grace-kernel-6.1.y
tools/perf/util/setup.py
#! /usr/bin/env python # SPDX-License-Identifier: GPL-2.0-only # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]> # import perf def main(context_switch = 0, thread = -1): cpus = perf.cpu_map() threads = perf.thread_map(thread) evsel = perf.evsel(type = perf.TYPE_SOFTWARE, config = perf.COUNT_SW_DUMMY, task = 1, comm = 1, mmap = 0, freq = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, context_switch = context_switch, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU) """What we want are just the PERF_RECORD_ lifetime events for threads, using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1 (the default), makes perf reenable irq_vectors:local_timer_entry, when disabling nohz, not good for some use cases where all we want is to get threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY, freq=0) instead.""" evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print("cpu: {0}, pid: {1}, tid: {2} {3}".format(event.sample_cpu, event.sample_pid, event.sample_tid, event)) if __name__ == '__main__': """ To test the PERF_RECORD_SWITCH record, pick a pid and replace in the following line. Example output: cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 } cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 } cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 } cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 } It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT to figure out if this is a context switch in or out of the monitored threads. If bored, please add command line option parsing support for these options :-) """ # main(context_switch = 1, thread = 31463) main()
grace-kernel-grace-kernel-6.1.y
tools/perf/python/twatch.py
#! /usr/bin/env python # SPDX-License-Identifier: GPL-2.0 # -*- python -*- # -*- coding: utf-8 -*- import perf class tracepoint(perf.evsel): def __init__(self, sys, name): config = perf.tracepoint(sys, name) perf.evsel.__init__(self, type = perf.TYPE_TRACEPOINT, config = config, freq = 0, sample_period = 1, wakeup_events = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_RAW | perf.SAMPLE_TIME) def main(): tp = tracepoint("sched", "sched_switch") cpus = perf.cpu_map() threads = perf.thread_map(-1) evlist = perf.evlist(cpus, threads) evlist.add(tp) evlist.open() evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue if not isinstance(event, perf.sample_event): continue print("time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % ( event.sample_time, event.prev_comm, event.prev_pid, event.prev_prio, event.prev_state, event.next_comm, event.next_pid, event.next_prio)) if __name__ == '__main__': main()
grace-kernel-grace-kernel-6.1.y
tools/perf/python/tracepoint.py
# SPDX-License-Identifier: GPL-2.0 from __future__ import print_function import os import sys import glob import optparse import tempfile import logging import shutil try: import configparser except ImportError: import ConfigParser as configparser def data_equal(a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') b_list = b.split('|') for a_item in a_list: for b_item in b_list: if (a_item == b_item): return True elif (a_item == '*') or (b_item == '*'): return True return False class Fail(Exception): def __init__(self, test, msg): self.msg = msg self.test = test def getMsg(self): return '\'%s\' - %s' % (self.test.path, self.msg) class Notest(Exception): def __init__(self, test, arch): self.arch = arch self.test = test def getMsg(self): return '[%s] \'%s\'' % (self.arch, self.test.path) class Unsup(Exception): def __init__(self, test): self.test = test def getMsg(self): return '\'%s\'' % self.test.path class Event(dict): terms = [ 'cpu', 'flags', 'type', 'size', 'config', 'sample_period', 'sample_type', 'read_format', 'disabled', 'inherit', 'pinned', 'exclusive', 'exclude_user', 'exclude_kernel', 'exclude_hv', 'exclude_idle', 'mmap', 'comm', 'freq', 'inherit_stat', 'enable_on_exec', 'task', 'watermark', 'precise_ip', 'mmap_data', 'sample_id_all', 'exclude_host', 'exclude_guest', 'exclude_callchain_kernel', 'exclude_callchain_user', 'wakeup_events', 'bp_type', 'config1', 'config2', 'branch_sample_type', 'sample_regs_user', 'sample_stack_user', ] def add(self, data): for key, val in data: log.debug(" %s = %s" % (key, val)) self[key] = val def __init__(self, name, data, base): log.debug(" Event %s" % name); self.name = name; self.group = '' self.add(base) self.add(data) def equal(self, other): for t in Event.terms: log.debug(" [%s] %s %s" % (t, self[t], other[t])); if t not in self or t not in other: return False if not data_equal(self[t], other[t]): return False return True def optional(self): if 'optional' in self and self['optional'] == '1': return True return False def diff(self, other): for t in Event.terms: if t not in self or t not in other: continue if not data_equal(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) # Test file description needs to have following sections: # [config] # - just single instance in file # - needs to specify: # 'command' - perf command name # 'args' - special command arguments # 'ret' - expected command return value (0 by default) # 'arch' - architecture specific test (optional) # comma separated list, ! at the beginning # negates it. # # [eventX:base] # - one or multiple instances in file # - expected values assignments class Test(object): def __init__(self, path, options): parser = configparser.SafeConfigParser() parser.read(path) log.warning("running '%s'" % path) self.path = path self.test_dir = options.test_dir self.perf = options.perf self.command = parser.get('config', 'command') self.args = parser.get('config', 'args') try: self.ret = parser.get('config', 'ret') except: self.ret = 0 try: self.arch = parser.get('config', 'arch') log.warning("test limitation '%s'" % self.arch) except: self.arch = '' self.expect = {} self.result = {} log.debug(" loading expected events"); self.load_events(path, self.expect) def is_event(self, name): if name.find("event") == -1: return False else: return True def skip_test(self, myarch): # If architecture not set always run test if self.arch == '': # log.warning("test for arch %s is ok" % myarch) return False # Allow multiple values in assignment separated by ',' arch_list = self.arch.split(',') # Handle negated list such as !s390x,ppc if arch_list[0][0] == '!': arch_list[0] = arch_list[0][1:] log.warning("excluded architecture list %s" % arch_list) for arch_item in arch_list: # log.warning("test for %s arch is %s" % (arch_item, myarch)) if arch_item == myarch: return True return False for arch_item in arch_list: # log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch)) if arch_item == myarch: return False return True def load_events(self, path, events): parser_event = configparser.SafeConfigParser() parser_event.read(path) # The event record section header contains 'event' word, # optionaly followed by ':' allowing to load 'parent # event' first as a base for section in filter(self.is_event, parser_event.sections()): parser_items = parser_event.items(section); base_items = {} # Read parent event if there's any if (':' in section): base = section[section.index(':') + 1:] parser_base = configparser.SafeConfigParser() parser_base.read(self.test_dir + '/' + base) base_items = parser_base.items('event') e = Event(section, parser_items, base_items) events[section] = e def run_cmd(self, tempdir): junk1, junk2, junk3, junk4, myarch = (os.uname()) if self.skip_test(myarch): raise Notest(self, myarch) cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, self.perf, self.command, tempdir, self.args) ret = os.WEXITSTATUS(os.system(cmd)) log.info(" '%s' ret '%s', expected '%s'" % (cmd, str(ret), str(self.ret))) if not data_equal(str(ret), str(self.ret)): raise Unsup(self) def compare(self, expect, result): match = {} log.debug(" compare"); # For each expected event find all matching # events in result. Fail if there's not any. for exp_name, exp_event in expect.items(): exp_list = [] res_event = {} log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items(): log.debug(" to [%s]" % res_name) if (exp_event.equal(res_event)): exp_list.append(res_name) log.debug(" ->OK") else: log.debug(" ->FAIL"); log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) # we did not any matching event - fail if not exp_list: if exp_event.optional(): log.debug(" %s does not match, but is optional" % exp_name) else: if not res_event: log.debug(" res_event is empty"); else: exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list # For each defined group in the expected events # check we match the same group in the result. for exp_name, exp_event in expect.items(): group = exp_event.group if (group == ''): continue for res_name in match[exp_name]: res_group = result[res_name].group if res_group not in match[group]: raise Fail(self, 'group failure') log.debug(" group: [%s] matches group leader %s" % (exp_name, str(match[group]))) log.debug(" matched") def resolve_groups(self, events): for name, event in events.items(): group_fd = event['group_fd']; if group_fd == '-1': continue; for iname, ievent in events.items(): if (ievent['fd'] == group_fd): event.group = iname log.debug('[%s] has group leader [%s]' % (name, iname)) break; def run(self): tempdir = tempfile.mkdtemp(); try: # run the test script self.run_cmd(tempdir); # load events expectation for the test log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'): self.load_events(f, self.result); # resolve group_fd to event names self.resolve_groups(self.expect); self.resolve_groups(self.result); # do the expectation - results matching - both ways self.compare(self.expect, self.result) self.compare(self.result, self.expect) finally: # cleanup shutil.rmtree(tempdir) def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try: Test(f, options).run() except Unsup as obj: log.warning("unsupp %s" % obj.getMsg()) except Notest as obj: log.warning("skipped %s" % obj.getMsg()) def setup_log(verbose): global log level = logging.CRITICAL if verbose == 1: level = logging.WARNING if verbose == 2: level = logging.INFO if verbose >= 3: level = logging.DEBUG log = logging.getLogger('test') log.setLevel(level) ch = logging.StreamHandler() ch.setLevel(level) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) log.addHandler(ch) USAGE = '''%s [OPTIONS] -d dir # tests dir -p path # perf binary -t test # single test -v # verbose level ''' % sys.argv[0] def main(): parser = optparse.OptionParser(usage=USAGE) parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-d", "--test-dir", action="store", type="string", dest="test_dir") parser.add_option("-p", "--perf", action="store", type="string", dest="perf") parser.add_option("-v", "--verbose", default=0, action="count", dest="verbose") options, args = parser.parse_args() if args: parser.error('FAILED wrong arguments %s' % ' '.join(args)) return -1 setup_log(options.verbose) if not options.test_dir: print('FAILED no -d option specified') sys.exit(-1) if not options.test: options.test = 'test*' try: run_tests(options) except Fail as obj: print("FAILED %s" % obj.getMsg()) sys.exit(-1) sys.exit(0) if __name__ == '__main__': main()
grace-kernel-grace-kernel-6.1.y
tools/perf/tests/attr.py
#!/usr/bin/python # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) # Basic sanity check of perf JSON output as specified in the man page. import argparse import sys import json ap = argparse.ArgumentParser() ap.add_argument('--no-args', action='store_true') ap.add_argument('--interval', action='store_true') ap.add_argument('--system-wide-no-aggr', action='store_true') ap.add_argument('--system-wide', action='store_true') ap.add_argument('--event', action='store_true') ap.add_argument('--per-core', action='store_true') ap.add_argument('--per-thread', action='store_true') ap.add_argument('--per-die', action='store_true') ap.add_argument('--per-node', action='store_true') ap.add_argument('--per-socket', action='store_true') args = ap.parse_args() Lines = sys.stdin.readlines() def isfloat(num): try: float(num) return True except ValueError: return False def isint(num): try: int(num) return True except ValueError: return False def is_counter_value(num): return isfloat(num) or num == '<not counted>' or num == '<not supported>' def check_json_output(expected_items): if expected_items != -1: for line in Lines: if 'failed' not in line: count = 0 count = line.count(',') if count != expected_items and count >= 1 and count <= 3 and 'metric-value' in line: # Events that generate >1 metric may have isolated metric # values and possibly other prefixes like interval, core and # aggregate-number. continue if count != expected_items: raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}' f' in \'{line}\'') checks = { 'aggregate-number': lambda x: isfloat(x), 'core': lambda x: True, 'counter-value': lambda x: is_counter_value(x), 'cgroup': lambda x: True, 'cpu': lambda x: isint(x), 'die': lambda x: True, 'event': lambda x: True, 'event-runtime': lambda x: isfloat(x), 'interval': lambda x: isfloat(x), 'metric-unit': lambda x: True, 'metric-value': lambda x: isfloat(x), 'node': lambda x: True, 'pcnt-running': lambda x: isfloat(x), 'socket': lambda x: True, 'thread': lambda x: True, 'unit': lambda x: True, } input = '[\n' + ','.join(Lines) + '\n]' for item in json.loads(input): for key, value in item.items(): if key not in checks: raise RuntimeError(f'Unexpected key: key={key} value={value}') if not checks[key](value): raise RuntimeError(f'Check failed for: key={key} value={value}') try: if args.no_args or args.system_wide or args.event: expected_items = 6 elif args.interval or args.per_thread or args.system_wide_no_aggr: expected_items = 7 elif args.per_core or args.per_socket or args.per_node or args.per_die: expected_items = 8 else: # If no option is specified, don't check the number of items. expected_items = -1 check_json_output(expected_items) except: print('Test failed for input:\n' + '\n'.join(Lines)) raise
grace-kernel-grace-kernel-6.1.y
tools/perf/tests/shell/lib/perf_json_output_lint.py
# SPDX-License-Identifier: GPL-2.0 # arm-cs-trace-disasm.py: ARM CoreSight Trace Dump With Disassember # # Author: Tor Jeremiassen <[email protected]> # Mathieu Poirier <[email protected]> # Leo Yan <[email protected]> # Al Grant <[email protected]> from __future__ import print_function import os from os import path import sys import re from subprocess import * from optparse import OptionParser, make_option from perf_trace_context import perf_set_itrace_options, \ perf_sample_insn, perf_sample_srccode # Below are some example commands for using this script. # # Output disassembly with objdump: # perf script -s scripts/python/arm-cs-trace-disasm.py \ # -- -d objdump -k path/to/vmlinux # Output disassembly with llvm-objdump: # perf script -s scripts/python/arm-cs-trace-disasm.py \ # -- -d llvm-objdump-11 -k path/to/vmlinux # Output only source line and symbols: # perf script -s scripts/python/arm-cs-trace-disasm.py # Command line parsing. option_list = [ # formatting options for the bottom entry of the stack make_option("-k", "--vmlinux", dest="vmlinux_name", help="Set path to vmlinux file"), make_option("-d", "--objdump", dest="objdump_name", help="Set path to objdump executable file"), make_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Enable debugging log") ] parser = OptionParser(option_list=option_list) (options, args) = parser.parse_args() # Initialize global dicts and regular expression disasm_cache = dict() cpu_data = dict() disasm_re = re.compile("^\s*([0-9a-fA-F]+):") disasm_func_re = re.compile("^\s*([0-9a-fA-F]+)\s.*:") cache_size = 64*1024 glb_source_file_name = None glb_line_number = None glb_dso = None def get_optional(perf_dict, field): if field in perf_dict: return perf_dict[field] return "[unknown]" def get_offset(perf_dict, field): if field in perf_dict: return "+%#x" % perf_dict[field] return "" def get_dso_file_path(dso_name, dso_build_id): if (dso_name == "[kernel.kallsyms]" or dso_name == "vmlinux"): if (options.vmlinux_name): return options.vmlinux_name; else: return dso_name if (dso_name == "[vdso]") : append = "/vdso" else: append = "/elf" dso_path = os.environ['PERF_BUILDID_DIR'] + "/" + dso_name + "/" + dso_build_id + append; # Replace duplicate slash chars to single slash char dso_path = dso_path.replace('//', '/', 1) return dso_path def read_disam(dso_fname, dso_start, start_addr, stop_addr): addr_range = str(start_addr) + ":" + str(stop_addr) + ":" + dso_fname # Don't let the cache get too big, clear it when it hits max size if (len(disasm_cache) > cache_size): disasm_cache.clear(); if addr_range in disasm_cache: disasm_output = disasm_cache[addr_range]; else: start_addr = start_addr - dso_start; stop_addr = stop_addr - dso_start; disasm = [ options.objdump_name, "-d", "-z", "--start-address="+format(start_addr,"#x"), "--stop-address="+format(stop_addr,"#x") ] disasm += [ dso_fname ] disasm_output = check_output(disasm).decode('utf-8').split('\n') disasm_cache[addr_range] = disasm_output return disasm_output def print_disam(dso_fname, dso_start, start_addr, stop_addr): for line in read_disam(dso_fname, dso_start, start_addr, stop_addr): m = disasm_func_re.search(line) if m is None: m = disasm_re.search(line) if m is None: continue print("\t" + line) def print_sample(sample): print("Sample = { cpu: %04d addr: 0x%016x phys_addr: 0x%016x ip: 0x%016x " \ "pid: %d tid: %d period: %d time: %d }" % \ (sample['cpu'], sample['addr'], sample['phys_addr'], \ sample['ip'], sample['pid'], sample['tid'], \ sample['period'], sample['time'])) def trace_begin(): print('ARM CoreSight Trace Data Assembler Dump') def trace_end(): print('End') def trace_unhandled(event_name, context, event_fields_dict): print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])) def common_start_str(comm, sample): sec = int(sample["time"] / 1000000000) ns = sample["time"] % 1000000000 cpu = sample["cpu"] pid = sample["pid"] tid = sample["tid"] return "%16s %5u/%-5u [%04u] %9u.%09u " % (comm, pid, tid, cpu, sec, ns) # This code is copied from intel-pt-events.py for printing source code # line and symbols. def print_srccode(comm, param_dict, sample, symbol, dso): ip = sample["ip"] if symbol == "[unknown]": start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40) else: offs = get_offset(param_dict, "symoff") start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40) global glb_source_file_name global glb_line_number global glb_dso source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context) if source_file_name: if glb_line_number == line_number and glb_source_file_name == source_file_name: src_str = "" else: if len(source_file_name) > 40: src_file = ("..." + source_file_name[-37:]) + " " else: src_file = source_file_name.ljust(41) if source_line is None: src_str = src_file + str(line_number).rjust(4) + " <source not found>" else: src_str = src_file + str(line_number).rjust(4) + " " + source_line glb_dso = None elif dso == glb_dso: src_str = "" else: src_str = dso glb_dso = dso glb_line_number = line_number glb_source_file_name = source_file_name print(start_str, src_str) def process_event(param_dict): global cache_size global options sample = param_dict["sample"] comm = param_dict["comm"] name = param_dict["ev_name"] dso = get_optional(param_dict, "dso") dso_bid = get_optional(param_dict, "dso_bid") dso_start = get_optional(param_dict, "dso_map_start") dso_end = get_optional(param_dict, "dso_map_end") symbol = get_optional(param_dict, "symbol") if (options.verbose == True): print("Event type: %s" % name) print_sample(sample) # If cannot find dso so cannot dump assembler, bail out if (dso == '[unknown]'): return # Validate dso start and end addresses if ((dso_start == '[unknown]') or (dso_end == '[unknown]')): print("Failed to find valid dso map for dso %s" % dso) return if (name[0:12] == "instructions"): print_srccode(comm, param_dict, sample, symbol, dso) return # Don't proceed if this event is not a branch sample, . if (name[0:8] != "branches"): return cpu = sample["cpu"] ip = sample["ip"] addr = sample["addr"] # Initialize CPU data if it's empty, and directly return back # if this is the first tracing event for this CPU. if (cpu_data.get(str(cpu) + 'addr') == None): cpu_data[str(cpu) + 'addr'] = addr return # The format for packet is: # # +------------+------------+------------+ # sample_prev: | addr | ip | cpu | # +------------+------------+------------+ # sample_next: | addr | ip | cpu | # +------------+------------+------------+ # # We need to combine the two continuous packets to get the instruction # range for sample_prev::cpu: # # [ sample_prev::addr .. sample_next::ip ] # # For this purose, sample_prev::addr is stored into cpu_data structure # and read back for 'start_addr' when the new packet comes, and we need # to use sample_next::ip to calculate 'stop_addr', plusing extra 4 for # 'stop_addr' is for the sake of objdump so the final assembler dump can # include last instruction for sample_next::ip. start_addr = cpu_data[str(cpu) + 'addr'] stop_addr = ip + 4 # Record for previous sample packet cpu_data[str(cpu) + 'addr'] = addr # Handle CS_ETM_TRACE_ON packet if start_addr=0 and stop_addr=4 if (start_addr == 0 and stop_addr == 4): print("CPU%d: CS_ETM_TRACE_ON packet is inserted" % cpu) return if (start_addr < int(dso_start) or start_addr > int(dso_end)): print("Start address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (start_addr, int(dso_start), int(dso_end), dso)) return if (stop_addr < int(dso_start) or stop_addr > int(dso_end)): print("Stop address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (stop_addr, int(dso_start), int(dso_end), dso)) return if (options.objdump_name != None): # It doesn't need to decrease virtual memory offset for disassembly # for kernel dso, so in this case we set vm_start to zero. if (dso == "[kernel.kallsyms]"): dso_vm_start = 0 else: dso_vm_start = int(dso_start) dso_fname = get_dso_file_path(dso, dso_bid) if path.exists(dso_fname): print_disam(dso_fname, dso_vm_start, start_addr, stop_addr) else: print("Failed to find dso %s for address range [ 0x%x .. 0x%x ]" % (dso, start_addr, stop_addr)) print_srccode(comm, param_dict, sample, symbol, dso)
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/arm-cs-trace-disasm.py
# event_analyzing_sample.py: general event handler in python # SPDX-License-Identifier: GPL-2.0 # # Current perf report is already very powerful with the annotation integrated, # and this script is not trying to be as powerful as perf report, but # providing end user/developer a flexible way to analyze the events other # than trace points. # # The 2 database related functions in this script just show how to gather # the basic information, and users can modify and write their own functions # according to their specific requirement. # # The first function "show_general_events" just does a basic grouping for all # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is # for a x86 HW PMU event: PEBS with load latency data. # from __future__ import print_function import os import sys import math import struct import sqlite3 sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from EventClass import * # # If the perf.data has a big number of samples, then the insert operation # will be very time consuming (about 10+ minutes for 10000 samples) if the # .db database is on disk. Move the .db file to RAM based FS to speedup # the handling, which will cut the time down to several seconds. # con = sqlite3.connect("/dev/shm/perf.db") con.isolation_level = None def trace_begin(): print("In trace_begin:\n") # # Will create several tables at the start, pebs_ll is for PEBS data with # load latency info, while gen_events is for general event. # con.execute(""" create table if not exists gen_events ( name text, symbol text, comm text, dso text );""") con.execute(""" create table if not exists pebs_ll ( name text, symbol text, comm text, dso text, flags integer, ip integer, status integer, dse integer, dla integer, lat integer );""") # # Create and insert event object to a database so that user could # do more analysis with simple database commands. # def process_event(param_dict): event_attr = param_dict["attr"] sample = param_dict["sample"] raw_buf = param_dict["raw_buf"] comm = param_dict["comm"] name = param_dict["ev_name"] # Symbol and dso info are not always resolved if ("dso" in param_dict): dso = param_dict["dso"] else: dso = "Unknown_dso" if ("symbol" in param_dict): symbol = param_dict["symbol"] else: symbol = "Unknown_symbol" # Create the event object and insert it to the right table in database event = create_event(name, comm, dso, symbol, raw_buf) insert_db(event) def insert_db(event): if event.ev_type == EVTYPE_GENERIC: con.execute("insert into gen_events values(?, ?, ?, ?)", (event.name, event.symbol, event.comm, event.dso)) elif event.ev_type == EVTYPE_PEBS_LL: event.ip &= 0x7fffffffffffffff event.dla &= 0x7fffffffffffffff con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (event.name, event.symbol, event.comm, event.dso, event.flags, event.ip, event.status, event.dse, event.dla, event.lat)) def trace_end(): print("In trace_end:\n") # We show the basic info for the 2 type of event classes show_general_events() show_pebs_ll() con.close() # # As the event number may be very big, so we can't use linear way # to show the histogram in real number, but use a log2 algorithm. # def num2sym(num): # Each number will have at least one '#' snum = '#' * (int)(math.log(num, 2) + 1) return snum def show_general_events(): # Check the total record number in the table count = con.execute("select count(*) from gen_events") for t in count: print("There is %d records in gen_events table" % t[0]) if t[0] == 0: return print("Statistics about the general events grouped by thread/symbol/dso: \n") # Group by thread commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)) for row in commq: print("%16s %8d %s" % (row[0], row[1], num2sym(row[1]))) # Group by symbol print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)) symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") for row in symbolq: print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) # Group by dso print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)) dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") for row in dsoq: print("%40s %8d %s" % (row[0], row[1], num2sym(row[1]))) # # This function just shows the basic info, and we could do more with the # data in the tables, like checking the function parameters when some # big latency events happen. # def show_pebs_ll(): count = con.execute("select count(*) from pebs_ll") for t in count: print("There is %d records in pebs_ll table" % t[0]) if t[0] == 0: return print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n") # Group by thread commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)) for row in commq: print("%16s %8d %s" % (row[0], row[1], num2sym(row[1]))) # Group by symbol print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)) symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") for row in symbolq: print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) # Group by dse dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)) for row in dseq: print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) # Group by latency latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)) for row in latq: print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) def trace_unhandled(event_name, context, event_fields_dict): print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/event_analyzing_sample.py
# Monitor the system for dropped packets and proudce a report of drop locations and counts # SPDX-License-Identifier: GPL-2.0 from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") except: return for line in f: loc = int(line.split()[0], 16) name = line.split()[2] kallsyms.append((loc, name)) kallsyms.sort() def get_sym(sloc): loc = int(sloc) # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start # kallsyms[i][0] > loc for all end <= i < len(kallsyms) start, end = -1, len(kallsyms) while end != start + 1: pivot = (start + end) // 2 if loc < kallsyms[pivot][0]: end = pivot else: start = pivot # Now (start == -1 or kallsyms[start][0] <= loc) # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) if start >= 0: symloc, name = kallsyms[start] return (name, loc - symloc) else: return (None, 0) def print_drop_table(): print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")) for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print("%25s %25s %25s" % (sym, off, drop_log[i])) def trace_begin(): print("Starting trace (Ctrl-C to dump results)") def trace_end(): print("Gathering kallsyms data") get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/net_dropmonitor.py
# export-to-postgresql.py: export perf data to a postgresql database # Copyright (c) 2014, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from __future__ import print_function import os import sys import struct import datetime # To use this script you will need to have installed package python-pyside which # provides LGPL-licensed Python bindings for Qt. You will also need the package # libqt4-sql-psql for Qt postgresql support. # # The script assumes postgresql is running on the local machine and that the # user has postgresql permissions to create databases. Examples of installing # postgresql and adding such a user are: # # fedora: # # $ sudo yum install postgresql postgresql-server qt-postgresql # $ sudo su - postgres -c initdb # $ sudo service postgresql start # $ sudo su - postgres # $ createuser -s <your user id here> # Older versions may not support -s, in which case answer the prompt below: # Shall the new role be a superuser? (y/n) y # $ sudo yum install python-pyside # # Alternately, to use Python3 and/or pyside 2, one of the following: # $ sudo yum install python3-pyside # $ pip install --user PySide2 # $ pip3 install --user PySide2 # # ubuntu: # # $ sudo apt-get install postgresql # $ sudo su - postgres # $ createuser -s <your user id here> # $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql # # Alternately, to use Python3 and/or pyside 2, one of the following: # # $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql # $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql # $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql # # An example of using this script with Intel PT: # # $ perf record -e intel_pt//u ls # $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls # 2015-05-29 12:49:23.464364 Creating database... # 2015-05-29 12:49:26.281717 Writing to intermediate files... # 2015-05-29 12:49:27.190383 Copying to database... # 2015-05-29 12:49:28.140451 Removing intermediate files... # 2015-05-29 12:49:28.147451 Adding primary keys # 2015-05-29 12:49:28.655683 Adding foreign keys # 2015-05-29 12:49:29.365350 Done # # To browse the database, psql can be used e.g. # # $ psql pt_example # pt_example=# select * from samples_view where id < 100; # pt_example=# \d+ # pt_example=# \d+ samples_view # pt_example=# \q # # An example of using the database is provided by the script # exported-sql-viewer.py. Refer to that script for details. # # Tables: # # The tables largely correspond to perf tools' data structures. They are largely self-explanatory. # # samples # # 'samples' is the main table. It represents what instruction was executing at a point in time # when something (a selected event) happened. The memory address is the instruction pointer or 'ip'. # # calls # # 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'. # 'calls' is only created when the 'calls' option to this script is specified. # # call_paths # # 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'. # 'calls_paths' is only created when the 'calls' option to this script is specified. # # branch_types # # 'branch_types' provides descriptions for each type of branch. # # comm_threads # # 'comm_threads' shows how 'comms' relates to 'threads'. # # comms # # 'comms' contains a record for each 'comm' - the name given to the executable that is running. # # dsos # # 'dsos' contains a record for each executable file or library. # # machines # # 'machines' can be used to distinguish virtual machines if virtualization is supported. # # selected_events # # 'selected_events' contains a record for each kind of event that has been sampled. # # symbols # # 'symbols' contains a record for each symbol. Only symbols that have samples are present. # # threads # # 'threads' contains a record for each thread. # # Views: # # Most of the tables have views for more friendly display. The views are: # # calls_view # call_paths_view # comm_threads_view # dsos_view # machines_view # samples_view # symbols_view # threads_view # # More examples of browsing the database with psql: # Note that some of the examples are not the most optimal SQL query. # Note that call information is only available if the script's 'calls' option has been used. # # Top 10 function calls (not aggregated by symbol): # # SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10; # # Top 10 function calls (aggregated by symbol): # # SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol, # SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count # FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10; # # Note that the branch count gives a rough estimation of cpu usage, so functions # that took a long time but have a relatively low branch count must have spent time # waiting. # # Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'): # # SELECT * FROM symbols_view WHERE name LIKE '%alloc%'; # # Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187): # # SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10; # # Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254): # # SELECT * FROM calls_view WHERE parent_call_path_id = 254; # # Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670) # # SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%'; # # Show transactions: # # SELECT * FROM samples_view WHERE event = 'transactions'; # # Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false. # Transaction aborts have branch_type_name 'transaction abort' # # Show transaction aborts: # # SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort'; # # To print a call stack requires walking the call_paths table. For example this python script: # #!/usr/bin/python2 # # import sys # from PySide.QtSql import * # # if __name__ == '__main__': # if (len(sys.argv) < 3): # print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>" # raise Exception("Too few arguments") # dbname = sys.argv[1] # call_path_id = sys.argv[2] # db = QSqlDatabase.addDatabase('QPSQL') # db.setDatabaseName(dbname) # if not db.open(): # raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) # query = QSqlQuery(db) # print " id ip symbol_id symbol dso_id dso_short_name" # while call_path_id != 0 and call_path_id != 1: # ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id)) # if not ret: # raise Exception("Query failed: " + query.lastError().text()) # if not query.next(): # raise Exception("Query failed") # print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5)) # call_path_id = query.value(6) pyside_version_1 = True if not "pyside-version-1" in sys.argv: try: from PySide2.QtSql import * pyside_version_1 = False except: pass if pyside_version_1: from PySide.QtSql import * if sys.version_info < (3, 0): def toserverstr(str): return str def toclientstr(str): return str else: # Assume UTF-8 server_encoding and client_encoding def toserverstr(str): return bytes(str, "UTF_8") def toclientstr(str): return bytes(str, "UTF_8") # Need to access PostgreSQL C library directly to use COPY FROM STDIN from ctypes import * libpq = CDLL("libpq.so.5") PQconnectdb = libpq.PQconnectdb PQconnectdb.restype = c_void_p PQconnectdb.argtypes = [ c_char_p ] PQfinish = libpq.PQfinish PQfinish.argtypes = [ c_void_p ] PQstatus = libpq.PQstatus PQstatus.restype = c_int PQstatus.argtypes = [ c_void_p ] PQexec = libpq.PQexec PQexec.restype = c_void_p PQexec.argtypes = [ c_void_p, c_char_p ] PQresultStatus = libpq.PQresultStatus PQresultStatus.restype = c_int PQresultStatus.argtypes = [ c_void_p ] PQputCopyData = libpq.PQputCopyData PQputCopyData.restype = c_int PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] PQputCopyEnd = libpq.PQputCopyEnd PQputCopyEnd.restype = c_int PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') # These perf imports are not used at present #from perf_trace_context import * #from Core import * perf_db_export_mode = True perf_db_export_calls = False perf_db_export_callchains = False def printerr(*args, **kw_args): print(*args, file=sys.stderr, **kw_args) def printdate(*args, **kw_args): print(datetime.datetime.today(), *args, sep=' ', **kw_args) def usage(): printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]"); printerr("where: columns 'all' or 'branches'"); printerr(" calls 'calls' => create calls and call_paths table"); printerr(" callchains 'callchains' => create call_paths table"); printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); raise Exception("Too few or bad arguments") if (len(sys.argv) < 2): usage() dbname = sys.argv[1] if (len(sys.argv) >= 3): columns = sys.argv[2] else: columns = "all" if columns not in ("all", "branches"): usage() branches = (columns == "branches") for i in range(3,len(sys.argv)): if (sys.argv[i] == "calls"): perf_db_export_calls = True elif (sys.argv[i] == "callchains"): perf_db_export_callchains = True elif (sys.argv[i] == "pyside-version-1"): pass else: usage() output_dir_name = os.getcwd() + "/" + dbname + "-perf-data" os.mkdir(output_dir_name) def do_query(q, s): if (q.exec_(s)): return raise Exception("Query failed: " + q.lastError().text()) printdate("Creating database...") db = QSqlDatabase.addDatabase('QPSQL') query = QSqlQuery(db) db.setDatabaseName('postgres') db.open() try: do_query(query, 'CREATE DATABASE ' + dbname) except: os.rmdir(output_dir_name) raise query.finish() query.clear() db.close() db.setDatabaseName(dbname) db.open() query = QSqlQuery(db) do_query(query, 'SET client_min_messages TO WARNING') do_query(query, 'CREATE TABLE selected_events (' 'id bigint NOT NULL,' 'name varchar(80))') do_query(query, 'CREATE TABLE machines (' 'id bigint NOT NULL,' 'pid integer,' 'root_dir varchar(4096))') do_query(query, 'CREATE TABLE threads (' 'id bigint NOT NULL,' 'machine_id bigint,' 'process_id bigint,' 'pid integer,' 'tid integer)') do_query(query, 'CREATE TABLE comms (' 'id bigint NOT NULL,' 'comm varchar(16),' 'c_thread_id bigint,' 'c_time bigint,' 'exec_flag boolean)') do_query(query, 'CREATE TABLE comm_threads (' 'id bigint NOT NULL,' 'comm_id bigint,' 'thread_id bigint)') do_query(query, 'CREATE TABLE dsos (' 'id bigint NOT NULL,' 'machine_id bigint,' 'short_name varchar(256),' 'long_name varchar(4096),' 'build_id varchar(64))') do_query(query, 'CREATE TABLE symbols (' 'id bigint NOT NULL,' 'dso_id bigint,' 'sym_start bigint,' 'sym_end bigint,' 'binding integer,' 'name varchar(2048))') do_query(query, 'CREATE TABLE branch_types (' 'id integer NOT NULL,' 'name varchar(80))') if branches: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean,' 'call_path_id bigint,' 'insn_count bigint,' 'cyc_count bigint,' 'flags integer)') else: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'period bigint,' 'weight bigint,' 'transaction bigint,' 'data_src bigint,' 'branch_type integer,' 'in_tx boolean,' 'call_path_id bigint,' 'insn_count bigint,' 'cyc_count bigint,' 'flags integer)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE TABLE call_paths (' 'id bigint NOT NULL,' 'parent_id bigint,' 'symbol_id bigint,' 'ip bigint)') if perf_db_export_calls: do_query(query, 'CREATE TABLE calls (' 'id bigint NOT NULL,' 'thread_id bigint,' 'comm_id bigint,' 'call_path_id bigint,' 'call_time bigint,' 'return_time bigint,' 'branch_count bigint,' 'call_id bigint,' 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer,' 'parent_id bigint,' 'insn_count bigint,' 'cyc_count bigint)') do_query(query, 'CREATE TABLE ptwrite (' 'id bigint NOT NULL,' 'payload bigint,' 'exact_ip boolean)') do_query(query, 'CREATE TABLE cbr (' 'id bigint NOT NULL,' 'cbr integer,' 'mhz integer,' 'percent integer)') do_query(query, 'CREATE TABLE mwait (' 'id bigint NOT NULL,' 'hints integer,' 'extensions integer)') do_query(query, 'CREATE TABLE pwre (' 'id bigint NOT NULL,' 'cstate integer,' 'subcstate integer,' 'hw boolean)') do_query(query, 'CREATE TABLE exstop (' 'id bigint NOT NULL,' 'exact_ip boolean)') do_query(query, 'CREATE TABLE pwrx (' 'id bigint NOT NULL,' 'deepest_cstate integer,' 'last_cstate integer,' 'wake_reason integer)') do_query(query, 'CREATE TABLE context_switches (' 'id bigint NOT NULL,' 'machine_id bigint,' 'time bigint,' 'cpu integer,' 'thread_out_id bigint,' 'comm_out_id bigint,' 'thread_in_id bigint,' 'comm_in_id bigint,' 'flags integer)') do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' 'id,' 'pid,' 'root_dir,' 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' ' FROM machines') do_query(query, 'CREATE VIEW dsos_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'short_name,' 'long_name,' 'build_id' ' FROM dsos') do_query(query, 'CREATE VIEW symbols_view AS ' 'SELECT ' 'id,' 'name,' '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' 'dso_id,' 'sym_start,' 'sym_end,' 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' ' FROM symbols') do_query(query, 'CREATE VIEW threads_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'process_id,' 'pid,' 'tid' ' FROM threads') do_query(query, 'CREATE VIEW comm_threads_view AS ' 'SELECT ' 'comm_id,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid' ' FROM comm_threads') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE VIEW call_paths_view AS ' 'SELECT ' 'c.id,' 'to_hex(c.ip) AS ip,' 'c.symbol_id,' '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' 'c.parent_id,' 'to_hex(p.ip) AS parent_ip,' 'p.symbol_id AS parent_symbol_id,' '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') if perf_db_export_calls: do_query(query, 'CREATE VIEW calls_view AS ' 'SELECT ' 'calls.id,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'call_path_id,' 'to_hex(ip) AS ip,' 'symbol_id,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'call_time,' 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' 'insn_count,' 'cyc_count,' 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,' 'call_id,' 'return_id,' 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,' 'parent_call_path_id,' 'calls.parent_id' ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') do_query(query, 'CREATE VIEW samples_view AS ' 'SELECT ' 'id,' 'time,' 'cpu,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' 'to_hex(ip) AS ip_hex,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'sym_offset,' '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' 'to_hex(to_ip) AS to_ip_hex,' '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' 'in_tx,' 'insn_count,' 'cyc_count,' 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,' 'flags' ' FROM samples') do_query(query, 'CREATE VIEW ptwrite_view AS ' 'SELECT ' 'ptwrite.id,' 'time,' 'cpu,' 'to_hex(payload) AS payload_hex,' 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' ' FROM ptwrite' ' INNER JOIN samples ON samples.id = ptwrite.id') do_query(query, 'CREATE VIEW cbr_view AS ' 'SELECT ' 'cbr.id,' 'time,' 'cpu,' 'cbr,' 'mhz,' 'percent' ' FROM cbr' ' INNER JOIN samples ON samples.id = cbr.id') do_query(query, 'CREATE VIEW mwait_view AS ' 'SELECT ' 'mwait.id,' 'time,' 'cpu,' 'to_hex(hints) AS hints_hex,' 'to_hex(extensions) AS extensions_hex' ' FROM mwait' ' INNER JOIN samples ON samples.id = mwait.id') do_query(query, 'CREATE VIEW pwre_view AS ' 'SELECT ' 'pwre.id,' 'time,' 'cpu,' 'cstate,' 'subcstate,' 'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw' ' FROM pwre' ' INNER JOIN samples ON samples.id = pwre.id') do_query(query, 'CREATE VIEW exstop_view AS ' 'SELECT ' 'exstop.id,' 'time,' 'cpu,' 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' ' FROM exstop' ' INNER JOIN samples ON samples.id = exstop.id') do_query(query, 'CREATE VIEW pwrx_view AS ' 'SELECT ' 'pwrx.id,' 'time,' 'cpu,' 'deepest_cstate,' 'last_cstate,' 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' ' WHEN wake_reason=2 THEN \'Timer Deadline\'' ' WHEN wake_reason=4 THEN \'Monitored Address\'' ' WHEN wake_reason=8 THEN \'HW\'' ' ELSE CAST ( wake_reason AS VARCHAR(2) )' 'END AS wake_reason' ' FROM pwrx' ' INNER JOIN samples ON samples.id = pwrx.id') do_query(query, 'CREATE VIEW power_events_view AS ' 'SELECT ' 'samples.id,' 'samples.time,' 'samples.cpu,' 'selected_events.name AS event,' 'FORMAT(\'%6s\', cbr.cbr) AS cbr,' 'FORMAT(\'%6s\', cbr.mhz) AS MHz,' 'FORMAT(\'%5s\', cbr.percent) AS percent,' 'to_hex(mwait.hints) AS hints_hex,' 'to_hex(mwait.extensions) AS extensions_hex,' 'FORMAT(\'%3s\', pwre.cstate) AS cstate,' 'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,' 'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,' 'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,' 'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,' 'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,' 'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\'' ' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\'' ' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\'' ' WHEN pwrx.wake_reason=8 THEN \'HW\'' ' ELSE FORMAT(\'%2s\', pwrx.wake_reason)' 'END AS wake_reason' ' FROM cbr' ' FULL JOIN mwait ON mwait.id = cbr.id' ' FULL JOIN pwre ON pwre.id = cbr.id' ' FULL JOIN exstop ON exstop.id = cbr.id' ' FULL JOIN pwrx ON pwrx.id = cbr.id' ' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)' ' INNER JOIN selected_events ON selected_events.id = samples.evsel_id' ' ORDER BY samples.id') do_query(query, 'CREATE VIEW context_switches_view AS ' 'SELECT ' 'context_switches.id,' 'context_switches.machine_id,' 'context_switches.time,' 'context_switches.cpu,' 'th_out.pid AS pid_out,' 'th_out.tid AS tid_out,' 'comm_out.comm AS comm_out,' 'th_in.pid AS pid_in,' 'th_in.tid AS tid_in,' 'comm_in.comm AS comm_in,' 'CASE WHEN context_switches.flags = 0 THEN \'in\'' ' WHEN context_switches.flags = 1 THEN \'out\'' ' WHEN context_switches.flags = 3 THEN \'out preempt\'' ' ELSE CAST ( context_switches.flags AS VARCHAR(11) )' 'END AS flags' ' FROM context_switches' ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id' ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id' ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id' ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id') file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) file_trailer = b"\377\377" def open_output_file(file_name): path_name = output_dir_name + "/" + file_name file = open(path_name, "wb+") file.write(file_header) return file def close_output_file(file): file.write(file_trailer) file.close() def copy_output_file_direct(file, table_name): close_output_file(file) sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')" do_query(query, sql) # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly def copy_output_file(file, table_name): conn = PQconnectdb(toclientstr("dbname = " + dbname)) if (PQstatus(conn)): raise Exception("COPY FROM STDIN PQconnectdb failed") file.write(file_trailer) file.seek(0) sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" res = PQexec(conn, toclientstr(sql)) if (PQresultStatus(res) != 4): raise Exception("COPY FROM STDIN PQexec failed") data = file.read(65536) while (len(data)): ret = PQputCopyData(conn, data, len(data)) if (ret != 1): raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret)) data = file.read(65536) ret = PQputCopyEnd(conn, None) if (ret != 1): raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret)) PQfinish(conn) def remove_output_file(file): name = file.name file.close() os.unlink(name) evsel_file = open_output_file("evsel_table.bin") machine_file = open_output_file("machine_table.bin") thread_file = open_output_file("thread_table.bin") comm_file = open_output_file("comm_table.bin") comm_thread_file = open_output_file("comm_thread_table.bin") dso_file = open_output_file("dso_table.bin") symbol_file = open_output_file("symbol_table.bin") branch_type_file = open_output_file("branch_type_table.bin") sample_file = open_output_file("sample_table.bin") if perf_db_export_calls or perf_db_export_callchains: call_path_file = open_output_file("call_path_table.bin") if perf_db_export_calls: call_file = open_output_file("call_table.bin") ptwrite_file = open_output_file("ptwrite_table.bin") cbr_file = open_output_file("cbr_table.bin") mwait_file = open_output_file("mwait_table.bin") pwre_file = open_output_file("pwre_table.bin") exstop_file = open_output_file("exstop_table.bin") pwrx_file = open_output_file("pwrx_table.bin") context_switches_file = open_output_file("context_switches_table.bin") def trace_begin(): printdate("Writing to intermediate files...") # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs evsel_table(0, "unknown") machine_table(0, 0, "unknown") thread_table(0, 0, 0, -1, -1) comm_table(0, "unknown", 0, 0, 0) dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls or perf_db_export_callchains: call_path_table(0, 0, 0, 0) call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) unhandled_count = 0 def is_table_empty(table_name): do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); if query.next(): return False return True def drop(table_name): do_query(query, 'DROP VIEW ' + table_name + '_view'); do_query(query, 'DROP TABLE ' + table_name); def trace_end(): printdate("Copying to database...") copy_output_file(evsel_file, "selected_events") copy_output_file(machine_file, "machines") copy_output_file(thread_file, "threads") copy_output_file(comm_file, "comms") copy_output_file(comm_thread_file, "comm_threads") copy_output_file(dso_file, "dsos") copy_output_file(symbol_file, "symbols") copy_output_file(branch_type_file, "branch_types") copy_output_file(sample_file, "samples") if perf_db_export_calls or perf_db_export_callchains: copy_output_file(call_path_file, "call_paths") if perf_db_export_calls: copy_output_file(call_file, "calls") copy_output_file(ptwrite_file, "ptwrite") copy_output_file(cbr_file, "cbr") copy_output_file(mwait_file, "mwait") copy_output_file(pwre_file, "pwre") copy_output_file(exstop_file, "exstop") copy_output_file(pwrx_file, "pwrx") copy_output_file(context_switches_file, "context_switches") printdate("Removing intermediate files...") remove_output_file(evsel_file) remove_output_file(machine_file) remove_output_file(thread_file) remove_output_file(comm_file) remove_output_file(comm_thread_file) remove_output_file(dso_file) remove_output_file(symbol_file) remove_output_file(branch_type_file) remove_output_file(sample_file) if perf_db_export_calls or perf_db_export_callchains: remove_output_file(call_path_file) if perf_db_export_calls: remove_output_file(call_file) remove_output_file(ptwrite_file) remove_output_file(cbr_file) remove_output_file(mwait_file) remove_output_file(pwre_file) remove_output_file(exstop_file) remove_output_file(pwrx_file) remove_output_file(context_switches_file) os.rmdir(output_dir_name) printdate("Adding primary keys") do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE context_switches ADD PRIMARY KEY (id)') printdate("Adding foreign keys") do_query(query, 'ALTER TABLE threads ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE comms ' 'ADD CONSTRAINT threadfk FOREIGN KEY (c_thread_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE comm_threads ' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE dsos ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)') do_query(query, 'ALTER TABLE symbols ' 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)') do_query(query, 'ALTER TABLE samples ' 'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),' 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),' 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),' 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'ALTER TABLE call_paths ' 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),' 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),' 'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),' 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),' 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') do_query(query, 'ALTER TABLE comms ADD has_calls boolean') do_query(query, 'UPDATE comms SET has_calls = TRUE WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)') do_query(query, 'ALTER TABLE ptwrite ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE cbr ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE mwait ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE pwre ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE exstop ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE pwrx ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE context_switches ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT toutfk FOREIGN KEY (thread_out_id) REFERENCES threads (id),' 'ADD CONSTRAINT tinfk FOREIGN KEY (thread_in_id) REFERENCES threads (id),' 'ADD CONSTRAINT coutfk FOREIGN KEY (comm_out_id) REFERENCES comms (id),' 'ADD CONSTRAINT cinfk FOREIGN KEY (comm_in_id) REFERENCES comms (id)') printdate("Dropping unused tables") if is_table_empty("ptwrite"): drop("ptwrite") if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): do_query(query, 'DROP VIEW power_events_view'); drop("mwait") drop("pwre") drop("exstop") drop("pwrx") if is_table_empty("cbr"): drop("cbr") if is_table_empty("context_switches"): drop("context_switches") if (unhandled_count): printdate("Warning: ", unhandled_count, " unhandled events") printdate("Done") def trace_unhandled(event_name, context, event_fields_dict): global unhandled_count unhandled_count += 1 def sched__sched_switch(*x): pass def evsel_table(evsel_id, evsel_name, *x): evsel_name = toserverstr(evsel_name) n = len(evsel_name) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) evsel_file.write(value) def machine_table(machine_id, pid, root_dir, *x): root_dir = toserverstr(root_dir) n = len(root_dir) fmt = "!hiqiii" + str(n) + "s" value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) machine_file.write(value) def thread_table(thread_id, machine_id, process_id, pid, tid, *x): value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid) thread_file.write(value) def comm_table(comm_id, comm_str, thread_id, time, exec_flag, *x): comm_str = toserverstr(comm_str) n = len(comm_str) fmt = "!hiqi" + str(n) + "s" + "iqiqiB" value = struct.pack(fmt, 5, 8, comm_id, n, comm_str, 8, thread_id, 8, time, 1, exec_flag) comm_file.write(value) def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): fmt = "!hiqiqiq" value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id) comm_thread_file.write(value) def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): short_name = toserverstr(short_name) long_name = toserverstr(long_name) build_id = toserverstr(build_id) n1 = len(short_name) n2 = len(long_name) n3 = len(build_id) fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s" value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id) dso_file.write(value) def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): symbol_name = toserverstr(symbol_name) n = len(symbol_name) fmt = "!hiqiqiqiqiii" + str(n) + "s" value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) symbol_file.write(value) def branch_type_table(branch_type, name, *x): name = toserverstr(name) n = len(name) fmt = "!hiii" + str(n) + "s" value = struct.pack(fmt, 2, 4, branch_type, n, name) branch_type_file.write(value) def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, flags, *x): if branches: value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiqii", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt, 4, flags) else: value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiqii", 25, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt, 4, flags) sample_file.write(value) def call_path_table(cp_id, parent_id, symbol_id, ip, *x): fmt = "!hiqiqiqiq" value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) call_path_file.write(value) def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x): fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq" value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt) call_file.write(value) def ptwrite(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) flags = data[0] payload = data[1] exact_ip = flags & 1 value = struct.pack("!hiqiqiB", 3, 8, id, 8, payload, 1, exact_ip) ptwrite_file.write(value) def cbr(id, raw_buf): data = struct.unpack_from("<BBBBII", raw_buf) cbr = data[0] MHz = (data[4] + 500) / 1000 percent = ((cbr * 1000 / data[2]) + 5) / 10 value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent)) cbr_file.write(value) def mwait(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] hints = payload & 0xff extensions = (payload >> 32) & 0x3 value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions) mwait_file.write(value) def pwre(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] hw = (payload >> 7) & 1 cstate = (payload >> 12) & 0xf subcstate = (payload >> 8) & 0xf value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw) pwre_file.write(value) def exstop(id, raw_buf): data = struct.unpack_from("<I", raw_buf) flags = data[0] exact_ip = flags & 1 value = struct.pack("!hiqiB", 2, 8, id, 1, exact_ip) exstop_file.write(value) def pwrx(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] deepest_cstate = payload & 0xf last_cstate = (payload >> 4) & 0xf wake_reason = (payload >> 8) & 0xf value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason) pwrx_file.write(value) def synth_data(id, config, raw_buf, *x): if config == 0: ptwrite(id, raw_buf) elif config == 1: mwait(id, raw_buf) elif config == 2: pwre(id, raw_buf) elif config == 3: exstop(id, raw_buf) elif config == 4: pwrx(id, raw_buf) elif config == 5: cbr(id, raw_buf) def context_switch_table(id, machine_id, time, cpu, thread_out_id, comm_out_id, thread_in_id, comm_in_id, flags, *x): fmt = "!hiqiqiqiiiqiqiqiqii" value = struct.pack(fmt, 9, 8, id, 8, machine_id, 8, time, 4, cpu, 8, thread_out_id, 8, comm_out_id, 8, thread_in_id, 8, comm_in_id, 4, flags) context_switches_file.write(value)
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/export-to-postgresql.py
# system call top # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. from __future__ import print_function import os, sys, time try: import thread except ImportError: import _thread as thread sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): raw_syscalls__sys_enter(**locals()) def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print("\nsyscall events for %s:\n" % (for_comm)) else: print("\nsyscall events:\n") print("%-40s %10s" % ("event", "count")) print("%-40s %10s" % ("----------------------------------------", "----------")) for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), reverse = True): try: print("%-40s %10d" % (syscall_name(id), val)) except TypeError: pass syscalls.clear() time.sleep(interval)
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/sctop.py
# SPDX-License-Identifier: GPL-2.0+ # # Copyright (C) 2018 Ravi Bangoria, IBM Corporation # # Hypervisor call statisics from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * # output: { # opcode: { # 'min': minimum time nsec # 'max': maximum time nsec # 'time': average time nsec # 'cnt': counter # } ... # } output = {} # d_enter: { # cpu: { # opcode: nsec # } ... # } d_enter = {} hcall_table = { 4: 'H_REMOVE', 8: 'H_ENTER', 12: 'H_READ', 16: 'H_CLEAR_MOD', 20: 'H_CLEAR_REF', 24: 'H_PROTECT', 28: 'H_GET_TCE', 32: 'H_PUT_TCE', 36: 'H_SET_SPRG0', 40: 'H_SET_DABR', 44: 'H_PAGE_INIT', 48: 'H_SET_ASR', 52: 'H_ASR_ON', 56: 'H_ASR_OFF', 60: 'H_LOGICAL_CI_LOAD', 64: 'H_LOGICAL_CI_STORE', 68: 'H_LOGICAL_CACHE_LOAD', 72: 'H_LOGICAL_CACHE_STORE', 76: 'H_LOGICAL_ICBI', 80: 'H_LOGICAL_DCBF', 84: 'H_GET_TERM_CHAR', 88: 'H_PUT_TERM_CHAR', 92: 'H_REAL_TO_LOGICAL', 96: 'H_HYPERVISOR_DATA', 100: 'H_EOI', 104: 'H_CPPR', 108: 'H_IPI', 112: 'H_IPOLL', 116: 'H_XIRR', 120: 'H_MIGRATE_DMA', 124: 'H_PERFMON', 220: 'H_REGISTER_VPA', 224: 'H_CEDE', 228: 'H_CONFER', 232: 'H_PROD', 236: 'H_GET_PPP', 240: 'H_SET_PPP', 244: 'H_PURR', 248: 'H_PIC', 252: 'H_REG_CRQ', 256: 'H_FREE_CRQ', 260: 'H_VIO_SIGNAL', 264: 'H_SEND_CRQ', 272: 'H_COPY_RDMA', 276: 'H_REGISTER_LOGICAL_LAN', 280: 'H_FREE_LOGICAL_LAN', 284: 'H_ADD_LOGICAL_LAN_BUFFER', 288: 'H_SEND_LOGICAL_LAN', 292: 'H_BULK_REMOVE', 304: 'H_MULTICAST_CTRL', 308: 'H_SET_XDABR', 312: 'H_STUFF_TCE', 316: 'H_PUT_TCE_INDIRECT', 332: 'H_CHANGE_LOGICAL_LAN_MAC', 336: 'H_VTERM_PARTNER_INFO', 340: 'H_REGISTER_VTERM', 344: 'H_FREE_VTERM', 348: 'H_RESET_EVENTS', 352: 'H_ALLOC_RESOURCE', 356: 'H_FREE_RESOURCE', 360: 'H_MODIFY_QP', 364: 'H_QUERY_QP', 368: 'H_REREGISTER_PMR', 372: 'H_REGISTER_SMR', 376: 'H_QUERY_MR', 380: 'H_QUERY_MW', 384: 'H_QUERY_HCA', 388: 'H_QUERY_PORT', 392: 'H_MODIFY_PORT', 396: 'H_DEFINE_AQP1', 400: 'H_GET_TRACE_BUFFER', 404: 'H_DEFINE_AQP0', 408: 'H_RESIZE_MR', 412: 'H_ATTACH_MCQP', 416: 'H_DETACH_MCQP', 420: 'H_CREATE_RPT', 424: 'H_REMOVE_RPT', 428: 'H_REGISTER_RPAGES', 432: 'H_DISABLE_AND_GETC', 436: 'H_ERROR_DATA', 440: 'H_GET_HCA_INFO', 444: 'H_GET_PERF_COUNT', 448: 'H_MANAGE_TRACE', 468: 'H_FREE_LOGICAL_LAN_BUFFER', 472: 'H_POLL_PENDING', 484: 'H_QUERY_INT_STATE', 580: 'H_ILLAN_ATTRIBUTES', 592: 'H_MODIFY_HEA_QP', 596: 'H_QUERY_HEA_QP', 600: 'H_QUERY_HEA', 604: 'H_QUERY_HEA_PORT', 608: 'H_MODIFY_HEA_PORT', 612: 'H_REG_BCMC', 616: 'H_DEREG_BCMC', 620: 'H_REGISTER_HEA_RPAGES', 624: 'H_DISABLE_AND_GET_HEA', 628: 'H_GET_HEA_INFO', 632: 'H_ALLOC_HEA_RESOURCE', 644: 'H_ADD_CONN', 648: 'H_DEL_CONN', 664: 'H_JOIN', 676: 'H_VASI_STATE', 688: 'H_ENABLE_CRQ', 696: 'H_GET_EM_PARMS', 720: 'H_SET_MPP', 724: 'H_GET_MPP', 748: 'H_HOME_NODE_ASSOCIATIVITY', 756: 'H_BEST_ENERGY', 764: 'H_XIRR_X', 768: 'H_RANDOM', 772: 'H_COP', 788: 'H_GET_MPP_X', 796: 'H_SET_MODE', 61440: 'H_RTAS', } def hcall_table_lookup(opcode): if (opcode in hcall_table): return hcall_table[opcode] else: return opcode print_ptrn = '%-28s%10s%10s%10s%10s' def trace_end(): print(print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')) print('-' * 68) for opcode in output: h_name = hcall_table_lookup(opcode) time = output[opcode]['time'] cnt = output[opcode]['cnt'] min_t = output[opcode]['min'] max_t = output[opcode]['max'] print(print_ptrn % (h_name, cnt, min_t, max_t, time//cnt)) def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain, opcode, retval): if (cpu in d_enter and opcode in d_enter[cpu]): diff = nsecs(sec, nsec) - d_enter[cpu][opcode] if (opcode in output): output[opcode]['time'] += diff output[opcode]['cnt'] += 1 if (output[opcode]['min'] > diff): output[opcode]['min'] = diff if (output[opcode]['max'] < diff): output[opcode]['max'] = diff else: output[opcode] = { 'time': diff, 'cnt': 1, 'min': diff, 'max': diff, } del d_enter[cpu][opcode] # else: # print("Can't find matching hcall_enter event. Ignoring sample") def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm, callchain, opcode): if (cpu in d_enter): d_enter[cpu][opcode] = nsecs(sec, nsec) else: d_enter[cpu] = {opcode: nsecs(sec, nsec)}
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/powerpc-hcalls.py
# mem-phys-addr.py: Resolve physical address samples # SPDX-License-Identifier: GPL-2.0 # # Copyright (c) 2018, Intel Corporation. from __future__ import division from __future__ import print_function import os import sys import struct import re import bisect import collections sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') #physical address ranges for System RAM system_ram = [] #physical address ranges for Persistent Memory pmem = [] #file object for proc iomem f = None #Count for each type of memory load_mem_type_cnt = collections.Counter() #perf event name event_name = None def parse_iomem(): global f f = open('/proc/iomem', 'r') for i, j in enumerate(f): m = re.split('-|:',j,2) if m[2].strip() == 'System RAM': system_ram.append(int(m[0], 16)) system_ram.append(int(m[1], 16)) if m[2].strip() == 'Persistent Memory': pmem.append(int(m[0], 16)) pmem.append(int(m[1], 16)) def print_memory_type(): print("Event: %s" % (event_name)) print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='') print("%-40s %10s %10s\n" % ("----------------------------------------", "-----------", "-----------"), end=''); total = sum(load_mem_type_cnt.values()) for mem_type, count in sorted(load_mem_type_cnt.most_common(), \ key = lambda kv: (kv[1], kv[0]), reverse = True): print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total), end='') def trace_begin(): parse_iomem() def trace_end(): print_memory_type() f.close() def is_system_ram(phys_addr): #/proc/iomem is sorted position = bisect.bisect(system_ram, phys_addr) if position % 2 == 0: return False return True def is_persistent_mem(phys_addr): position = bisect.bisect(pmem, phys_addr) if position % 2 == 0: return False return True def find_memory_type(phys_addr): if phys_addr == 0: return "N/A" if is_system_ram(phys_addr): return "System RAM" if is_persistent_mem(phys_addr): return "Persistent Memory" #slow path, search all f.seek(0, 0) for j in f: m = re.split('-|:',j,2) if int(m[0], 16) <= phys_addr <= int(m[1], 16): return m[2] return "N/A" def process_event(param_dict): name = param_dict["ev_name"] sample = param_dict["sample"] phys_addr = sample["phys_addr"] global event_name if event_name == None: event_name = name load_mem_type_cnt[find_memory_type(phys_addr)] += 1
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/mem-phys-addr.py
#!/usr/bin/env python # SPDX-License-Identifier: GPL-2.0 # libxed.py: Python wrapper for libxed.so # Copyright (c) 2014-2021, Intel Corporation. # To use Intel XED, libxed.so must be present. To build and install # libxed.so: # git clone https://github.com/intelxed/mbuild.git mbuild # git clone https://github.com/intelxed/xed # cd xed # ./mfile.py --share # sudo ./mfile.py --prefix=/usr/local install # sudo ldconfig # import sys from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong # XED Disassembler class xed_state_t(Structure): _fields_ = [ ("mode", c_int), ("width", c_int) ] class XEDInstruction(): def __init__(self, libxed): # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion xedd_t = c_byte * 512 self.xedd = xedd_t() self.xedp = addressof(self.xedd) libxed.xed_decoded_inst_zero(self.xedp) self.state = xed_state_t() self.statep = addressof(self.state) # Buffer for disassembled instruction text self.buffer = create_string_buffer(256) self.bufferp = addressof(self.buffer) class LibXED(): def __init__(self): try: self.libxed = CDLL("libxed.so") except: self.libxed = None if not self.libxed: self.libxed = CDLL("/usr/local/lib/libxed.so") self.xed_tables_init = self.libxed.xed_tables_init self.xed_tables_init.restype = None self.xed_tables_init.argtypes = [] self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero self.xed_decoded_inst_zero.restype = None self.xed_decoded_inst_zero.argtypes = [ c_void_p ] self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode self.xed_operand_values_set_mode.restype = None self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ] self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode self.xed_decoded_inst_zero_keep_mode.restype = None self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ] self.xed_decode = self.libxed.xed_decode self.xed_decode.restype = c_int self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ] self.xed_format_context = self.libxed.xed_format_context self.xed_format_context.restype = c_uint self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ] self.xed_tables_init() def Instruction(self): return XEDInstruction(self) def SetMode(self, inst, mode): if mode: inst.state.mode = 4 # 32-bit inst.state.width = 4 # 4 bytes else: inst.state.mode = 1 # 64-bit inst.state.width = 8 # 8 bytes self.xed_operand_values_set_mode(inst.xedp, inst.statep) def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip): self.xed_decoded_inst_zero_keep_mode(inst.xedp) err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt) if err: return 0, "" # Use AT&T mode (2), alternative is Intel (3) ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) if not ok: return 0, "" if sys.version_info[0] == 2: result = inst.buffer.value else: result = inst.buffer.value.decode() # Return instruction length and the disassembled instruction text # For now, assume the length is in byte 166 return inst.xedd[166], result
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/libxed.py
# system call counts # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print("Press control+C to stop and show the summary") def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): raw_syscalls__sys_enter(**locals()) def print_syscall_totals(): if for_comm is not None: print("\nsyscall events for %s:\n" % (for_comm)) else: print("\nsyscall events:\n") print("%-40s %10s" % ("event", "count")) print("%-40s %10s" % ("----------------------------------------", "-----------")) for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), reverse = True): print("%-40s %10d" % (syscall_name(id), val))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/syscall-counts.py
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, nr, ret): if tid in thread_blocktime: elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print("Press control+C to stop and show the summary") def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print("%s[%d] lock %x contended %d times, %d avg ns [max: %d ns, min %d ns]" % (process_names[tid], tid, lock, count, avg, max, min))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/futex-contention.py
#!/usr/bin/env python # SPDX-License-Identifier: GPL-2.0 # exported-sql-viewer.py: view data from sql database # Copyright (c) 2014-2018, Intel Corporation. # To use this script you will need to have exported data using either the # export-to-sqlite.py or the export-to-postgresql.py script. Refer to those # scripts for details. # # Following on from the example in the export scripts, a # call-graph can be displayed for the pt_example database like this: # # python tools/perf/scripts/python/exported-sql-viewer.py pt_example # # Note that for PostgreSQL, this script supports connecting to remote databases # by setting hostname, port, username, password, and dbname e.g. # # python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" # # The result is a GUI window with a tree representing a context-sensitive # call-graph. Expanding a couple of levels of the tree and adjusting column # widths to suit will display something like: # # Call Graph: pt_example # Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%) # v- ls # v- 2638:2638 # v- _start ld-2.19.so 1 10074071 100.0 211135 100.0 # |- unknown unknown 1 13198 0.1 1 0.0 # >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3 # >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3 # v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4 # >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1 # >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0 # >- __libc_csu_init ls 1 10354 0.1 10 0.0 # |- _setjmp libc-2.19.so 1 0 0.0 4 0.0 # v- main ls 1 8182043 99.6 180254 99.9 # # Points to note: # The top level is a command name (comm) # The next level is a thread (pid:tid) # Subsequent levels are functions # 'Count' is the number of calls # 'Time' is the elapsed time until the function returns # Percentages are relative to the level above # 'Branch Count' is the total number of branches for that function and all # functions that it calls # There is also a "All branches" report, which displays branches and # possibly disassembly. However, presently, the only supported disassembler is # Intel XED, and additionally the object code must be present in perf build ID # cache. To use Intel XED, libxed.so must be present. To build and install # libxed.so: # git clone https://github.com/intelxed/mbuild.git mbuild # git clone https://github.com/intelxed/xed # cd xed # ./mfile.py --share # sudo ./mfile.py --prefix=/usr/local install # sudo ldconfig # # Example report: # # Time CPU Command PID TID Branch Type In Tx Branch # 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so) # 7fab593ea260 48 89 e7 mov %rsp, %rdi # 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) # 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so) # 7fab593ea260 48 89 e7 mov %rsp, %rdi # 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930 # 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so) # 7fab593ea930 55 pushq %rbp # 7fab593ea931 48 89 e5 mov %rsp, %rbp # 7fab593ea934 41 57 pushq %r15 # 7fab593ea936 41 56 pushq %r14 # 7fab593ea938 41 55 pushq %r13 # 7fab593ea93a 41 54 pushq %r12 # 7fab593ea93c 53 pushq %rbx # 7fab593ea93d 48 89 fb mov %rdi, %rbx # 7fab593ea940 48 83 ec 68 sub $0x68, %rsp # 7fab593ea944 0f 31 rdtsc # 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx # 7fab593ea94a 89 c0 mov %eax, %eax # 7fab593ea94c 48 09 c2 or %rax, %rdx # 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax # 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) # 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so) # 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax # 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) # 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) from __future__ import print_function import sys # Only change warnings if the python -W option was not used if not sys.warnoptions: import warnings # PySide2 causes deprecation warnings, ignore them. warnings.filterwarnings("ignore", category=DeprecationWarning) import argparse import weakref import threading import string try: # Python2 import cPickle as pickle # size of pickled integer big enough for record size glb_nsz = 8 except ImportError: import pickle glb_nsz = 16 import re import os import random import copy import math from libxed import LibXED pyside_version_1 = True if not "--pyside-version-1" in sys.argv: try: from PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtSql import * from PySide2.QtWidgets import * pyside_version_1 = False except: pass if pyside_version_1: from PySide.QtCore import * from PySide.QtGui import * from PySide.QtSql import * from decimal import Decimal, ROUND_HALF_UP from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong from multiprocessing import Process, Array, Value, Event # xrange is range in Python3 try: xrange except NameError: xrange = range def printerr(*args, **keyword_args): print(*args, file=sys.stderr, **keyword_args) # Data formatting helpers def tohex(ip): if ip < 0: ip += 1 << 64 return "%x" % ip def offstr(offset): if offset: return "+0x%x" % offset return "" def dsoname(name): if name == "[kernel.kallsyms]": return "[kernel]" return name def findnth(s, sub, n, offs=0): pos = s.find(sub) if pos < 0: return pos if n <= 1: return offs + pos return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1) # Percent to one decimal place def PercentToOneDP(n, d): if not d: return "0.0" x = (n * Decimal(100)) / d return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP)) # Helper for queries that must not fail def QueryExec(query, stmt): ret = query.exec_(stmt) if not ret: raise Exception("Query failed: " + query.lastError().text()) # Background thread class Thread(QThread): done = Signal(object) def __init__(self, task, param=None, parent=None): super(Thread, self).__init__(parent) self.task = task self.param = param def run(self): while True: if self.param is None: done, result = self.task() else: done, result = self.task(self.param) self.done.emit(result) if done: break # Tree data model class TreeModel(QAbstractItemModel): def __init__(self, glb, params, parent=None): super(TreeModel, self).__init__(parent) self.glb = glb self.params = params self.root = self.GetRoot() self.last_row_read = 0 def Item(self, parent): if parent.isValid(): return parent.internalPointer() else: return self.root def rowCount(self, parent): result = self.Item(parent).childCount() if result < 0: result = 0 self.dataChanged.emit(parent, parent) return result def hasChildren(self, parent): return self.Item(parent).hasChildren() def headerData(self, section, orientation, role): if role == Qt.TextAlignmentRole: return self.columnAlignment(section) if role != Qt.DisplayRole: return None if orientation != Qt.Horizontal: return None return self.columnHeader(section) def parent(self, child): child_item = child.internalPointer() if child_item is self.root: return QModelIndex() parent_item = child_item.getParentItem() return self.createIndex(parent_item.getRow(), 0, parent_item) def index(self, row, column, parent): child_item = self.Item(parent).getChildItem(row) return self.createIndex(row, column, child_item) def DisplayData(self, item, index): return item.getData(index.column()) def FetchIfNeeded(self, row): if row > self.last_row_read: self.last_row_read = row if row + 10 >= self.root.child_count: self.fetcher.Fetch(glb_chunk_sz) def columnAlignment(self, column): return Qt.AlignLeft def columnFont(self, column): return None def data(self, index, role): if role == Qt.TextAlignmentRole: return self.columnAlignment(index.column()) if role == Qt.FontRole: return self.columnFont(index.column()) if role != Qt.DisplayRole: return None item = index.internalPointer() return self.DisplayData(item, index) # Table data model class TableModel(QAbstractTableModel): def __init__(self, parent=None): super(TableModel, self).__init__(parent) self.child_count = 0 self.child_items = [] self.last_row_read = 0 def Item(self, parent): if parent.isValid(): return parent.internalPointer() else: return self def rowCount(self, parent): return self.child_count def headerData(self, section, orientation, role): if role == Qt.TextAlignmentRole: return self.columnAlignment(section) if role != Qt.DisplayRole: return None if orientation != Qt.Horizontal: return None return self.columnHeader(section) def index(self, row, column, parent): return self.createIndex(row, column, self.child_items[row]) def DisplayData(self, item, index): return item.getData(index.column()) def FetchIfNeeded(self, row): if row > self.last_row_read: self.last_row_read = row if row + 10 >= self.child_count: self.fetcher.Fetch(glb_chunk_sz) def columnAlignment(self, column): return Qt.AlignLeft def columnFont(self, column): return None def data(self, index, role): if role == Qt.TextAlignmentRole: return self.columnAlignment(index.column()) if role == Qt.FontRole: return self.columnFont(index.column()) if role != Qt.DisplayRole: return None item = index.internalPointer() return self.DisplayData(item, index) # Model cache model_cache = weakref.WeakValueDictionary() model_cache_lock = threading.Lock() def LookupCreateModel(model_name, create_fn): model_cache_lock.acquire() try: model = model_cache[model_name] except: model = None if model is None: model = create_fn() model_cache[model_name] = model model_cache_lock.release() return model def LookupModel(model_name): model_cache_lock.acquire() try: model = model_cache[model_name] except: model = None model_cache_lock.release() return model # Find bar class FindBar(): def __init__(self, parent, finder, is_reg_expr=False): self.finder = finder self.context = [] self.last_value = None self.last_pattern = None label = QLabel("Find:") label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.textbox = QComboBox() self.textbox.setEditable(True) self.textbox.currentIndexChanged.connect(self.ValueChanged) self.progress = QProgressBar() self.progress.setRange(0, 0) self.progress.hide() if is_reg_expr: self.pattern = QCheckBox("Regular Expression") else: self.pattern = QCheckBox("Pattern") self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.next_button = QToolButton() self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown)) self.next_button.released.connect(lambda: self.NextPrev(1)) self.prev_button = QToolButton() self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp)) self.prev_button.released.connect(lambda: self.NextPrev(-1)) self.close_button = QToolButton() self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton)) self.close_button.released.connect(self.Deactivate) self.hbox = QHBoxLayout() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.addWidget(label) self.hbox.addWidget(self.textbox) self.hbox.addWidget(self.progress) self.hbox.addWidget(self.pattern) self.hbox.addWidget(self.next_button) self.hbox.addWidget(self.prev_button) self.hbox.addWidget(self.close_button) self.bar = QWidget() self.bar.setLayout(self.hbox) self.bar.hide() def Widget(self): return self.bar def Activate(self): self.bar.show() self.textbox.lineEdit().selectAll() self.textbox.setFocus() def Deactivate(self): self.bar.hide() def Busy(self): self.textbox.setEnabled(False) self.pattern.hide() self.next_button.hide() self.prev_button.hide() self.progress.show() def Idle(self): self.textbox.setEnabled(True) self.progress.hide() self.pattern.show() self.next_button.show() self.prev_button.show() def Find(self, direction): value = self.textbox.currentText() pattern = self.pattern.isChecked() self.last_value = value self.last_pattern = pattern self.finder.Find(value, direction, pattern, self.context) def ValueChanged(self): value = self.textbox.currentText() pattern = self.pattern.isChecked() index = self.textbox.currentIndex() data = self.textbox.itemData(index) # Store the pattern in the combo box to keep it with the text value if data == None: self.textbox.setItemData(index, pattern) else: self.pattern.setChecked(data) self.Find(0) def NextPrev(self, direction): value = self.textbox.currentText() pattern = self.pattern.isChecked() if value != self.last_value: index = self.textbox.findText(value) # Allow for a button press before the value has been added to the combo box if index < 0: index = self.textbox.count() self.textbox.addItem(value, pattern) self.textbox.setCurrentIndex(index) return else: self.textbox.setItemData(index, pattern) elif pattern != self.last_pattern: # Keep the pattern recorded in the combo box up to date index = self.textbox.currentIndex() self.textbox.setItemData(index, pattern) self.Find(direction) def NotFound(self): QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found") # Context-sensitive call graph data model item base class CallGraphLevelItemBase(object): def __init__(self, glb, params, row, parent_item): self.glb = glb self.params = params self.row = row self.parent_item = parent_item self.query_done = False self.child_count = 0 self.child_items = [] if parent_item: self.level = parent_item.level + 1 else: self.level = 0 def getChildItem(self, row): return self.child_items[row] def getParentItem(self): return self.parent_item def getRow(self): return self.row def childCount(self): if not self.query_done: self.Select() if not self.child_count: return -1 return self.child_count def hasChildren(self): if not self.query_done: return True return self.child_count > 0 def getData(self, column): return self.data[column] # Context-sensitive call graph data model level 2+ item base class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase): def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item): super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item) self.comm_id = comm_id self.thread_id = thread_id self.call_path_id = call_path_id self.insn_cnt = insn_cnt self.cyc_cnt = cyc_cnt self.branch_count = branch_count self.time = time def Select(self): self.query_done = True query = QSqlQuery(self.glb.db) if self.params.have_ipc: ipc_str = ", SUM(insn_count), SUM(cyc_count)" else: ipc_str = "" QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" " INNER JOIN dsos ON symbols.dso_id = dsos.id" " WHERE parent_call_path_id = " + str(self.call_path_id) + " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id) + " GROUP BY call_path_id, name, short_name" " ORDER BY call_path_id") while query.next(): if self.params.have_ipc: insn_cnt = int(query.value(5)) cyc_cnt = int(query.value(6)) branch_count = int(query.value(7)) else: insn_cnt = 0 cyc_cnt = 0 branch_count = int(query.value(5)) child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self) self.child_items.append(child_item) self.child_count += 1 # Context-sensitive call graph data model level three item class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase): def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item): super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item) dso = dsoname(dso) if self.params.have_ipc: insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt) cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt) br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count) ipc = CalcIPC(cyc_cnt, insn_cnt) self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ] else: self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] self.dbid = call_path_id # Context-sensitive call graph data model level two item class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase): def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item): super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item) if self.params.have_ipc: self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""] else: self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] self.dbid = thread_id def Select(self): super(CallGraphLevelTwoItem, self).Select() for child_item in self.child_items: self.time += child_item.time self.insn_cnt += child_item.insn_cnt self.cyc_cnt += child_item.cyc_cnt self.branch_count += child_item.branch_count for child_item in self.child_items: child_item.data[4] = PercentToOneDP(child_item.time, self.time) if self.params.have_ipc: child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt) child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt) child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count) else: child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) # Context-sensitive call graph data model level one item class CallGraphLevelOneItem(CallGraphLevelItemBase): def __init__(self, glb, params, row, comm_id, comm, parent_item): super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item) if self.params.have_ipc: self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""] else: self.data = [comm, "", "", "", "", "", ""] self.dbid = comm_id def Select(self): self.query_done = True query = QSqlQuery(self.glb.db) QueryExec(query, "SELECT thread_id, pid, tid" " FROM comm_threads" " INNER JOIN threads ON thread_id = threads.id" " WHERE comm_id = " + str(self.dbid)) while query.next(): child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) self.child_items.append(child_item) self.child_count += 1 # Context-sensitive call graph data model root item class CallGraphRootItem(CallGraphLevelItemBase): def __init__(self, glb, params): super(CallGraphRootItem, self).__init__(glb, params, 0, None) self.dbid = 0 self.query_done = True if_has_calls = "" if IsSelectable(glb.db, "comms", columns = "has_calls"): if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE query = QSqlQuery(glb.db) QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls) while query.next(): if not query.value(0): continue child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self) self.child_items.append(child_item) self.child_count += 1 # Call graph model parameters class CallGraphModelParams(): def __init__(self, glb, parent=None): self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count") # Context-sensitive call graph data model base class CallGraphModelBase(TreeModel): def __init__(self, glb, parent=None): super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent) def FindSelect(self, value, pattern, query): if pattern: # postgresql and sqlite pattern patching differences: # postgresql LIKE is case sensitive but sqlite LIKE is not # postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not # postgresql supports ILIKE which is case insensitive # sqlite supports GLOB (text only) which uses * and ? and is case sensitive if not self.glb.dbref.is_sqlite3: # Escape % and _ s = value.replace("%", "\%") s = s.replace("_", "\_") # Translate * and ? into SQL LIKE pattern characters % and _ trans = string.maketrans("*?", "%_") match = " LIKE '" + str(s).translate(trans) + "'" else: match = " GLOB '" + str(value) + "'" else: match = " = '" + str(value) + "'" self.DoFindSelect(query, match) def Found(self, query, found): if found: return self.FindPath(query) return [] def FindValue(self, value, pattern, query, last_value, last_pattern): if last_value == value and pattern == last_pattern: found = query.first() else: self.FindSelect(value, pattern, query) found = query.next() return self.Found(query, found) def FindNext(self, query): found = query.next() if not found: found = query.first() return self.Found(query, found) def FindPrev(self, query): found = query.previous() if not found: found = query.last() return self.Found(query, found) def FindThread(self, c): if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern: ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern) elif c.direction > 0: ids = self.FindNext(c.query) else: ids = self.FindPrev(c.query) return (True, ids) def Find(self, value, direction, pattern, context, callback): class Context(): def __init__(self, *x): self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x def Update(self, *x): self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern) if len(context): context[0].Update(value, direction, pattern) else: context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None)) # Use a thread so the UI is not blocked during the SELECT thread = Thread(self.FindThread, context[0]) thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection) thread.start() def FindDone(self, thread, callback, ids): callback(ids) # Context-sensitive call graph data model class CallGraphModel(CallGraphModelBase): def __init__(self, glb, parent=None): super(CallGraphModel, self).__init__(glb, parent) def GetRoot(self): return CallGraphRootItem(self.glb, self.params) def columnCount(self, parent=None): if self.params.have_ipc: return 12 else: return 7 def columnHeader(self, column): if self.params.have_ipc: headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "] else: headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] return headers[column] def columnAlignment(self, column): if self.params.have_ipc: alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] else: alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] return alignment[column] def DoFindSelect(self, query, match): QueryExec(query, "SELECT call_path_id, comm_id, thread_id" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" " WHERE calls.id <> 0" " AND symbols.name" + match + " GROUP BY comm_id, thread_id, call_path_id" " ORDER BY comm_id, thread_id, call_path_id") def FindPath(self, query): # Turn the query result into a list of ids that the tree view can walk # to open the tree at the right place. ids = [] parent_id = query.value(0) while parent_id: ids.insert(0, parent_id) q2 = QSqlQuery(self.glb.db) QueryExec(q2, "SELECT parent_id" " FROM call_paths" " WHERE id = " + str(parent_id)) if not q2.next(): break parent_id = q2.value(0) # The call path root is not used if ids[0] == 1: del ids[0] ids.insert(0, query.value(2)) ids.insert(0, query.value(1)) return ids # Call tree data model level 2+ item base class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): def __init__(self, glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item): super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item) self.comm_id = comm_id self.thread_id = thread_id self.calls_id = calls_id self.call_time = call_time self.time = time self.insn_cnt = insn_cnt self.cyc_cnt = cyc_cnt self.branch_count = branch_count def Select(self): self.query_done = True if self.calls_id == 0: comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id) else: comm_thread = "" if self.params.have_ipc: ipc_str = ", insn_count, cyc_count" else: ipc_str = "" query = QSqlQuery(self.glb.db) QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" " INNER JOIN dsos ON symbols.dso_id = dsos.id" " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread + " ORDER BY call_time, calls.id") while query.next(): if self.params.have_ipc: insn_cnt = int(query.value(5)) cyc_cnt = int(query.value(6)) branch_count = int(query.value(7)) else: insn_cnt = 0 cyc_cnt = 0 branch_count = int(query.value(5)) child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self) self.child_items.append(child_item) self.child_count += 1 # Call tree data model level three item class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase): def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item): super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item) dso = dsoname(dso) if self.params.have_ipc: insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt) cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt) br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count) ipc = CalcIPC(cyc_cnt, insn_cnt) self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ] else: self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] self.dbid = calls_id # Call tree data model level two item class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase): def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item): super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, 0, parent_item) if self.params.have_ipc: self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""] else: self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] self.dbid = thread_id def Select(self): super(CallTreeLevelTwoItem, self).Select() for child_item in self.child_items: self.time += child_item.time self.insn_cnt += child_item.insn_cnt self.cyc_cnt += child_item.cyc_cnt self.branch_count += child_item.branch_count for child_item in self.child_items: child_item.data[4] = PercentToOneDP(child_item.time, self.time) if self.params.have_ipc: child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt) child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt) child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count) else: child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) # Call tree data model level one item class CallTreeLevelOneItem(CallGraphLevelItemBase): def __init__(self, glb, params, row, comm_id, comm, parent_item): super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item) if self.params.have_ipc: self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""] else: self.data = [comm, "", "", "", "", "", ""] self.dbid = comm_id def Select(self): self.query_done = True query = QSqlQuery(self.glb.db) QueryExec(query, "SELECT thread_id, pid, tid" " FROM comm_threads" " INNER JOIN threads ON thread_id = threads.id" " WHERE comm_id = " + str(self.dbid)) while query.next(): child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) self.child_items.append(child_item) self.child_count += 1 # Call tree data model root item class CallTreeRootItem(CallGraphLevelItemBase): def __init__(self, glb, params): super(CallTreeRootItem, self).__init__(glb, params, 0, None) self.dbid = 0 self.query_done = True if_has_calls = "" if IsSelectable(glb.db, "comms", columns = "has_calls"): if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE query = QSqlQuery(glb.db) QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls) while query.next(): if not query.value(0): continue child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self) self.child_items.append(child_item) self.child_count += 1 # Call Tree data model class CallTreeModel(CallGraphModelBase): def __init__(self, glb, parent=None): super(CallTreeModel, self).__init__(glb, parent) def GetRoot(self): return CallTreeRootItem(self.glb, self.params) def columnCount(self, parent=None): if self.params.have_ipc: return 12 else: return 7 def columnHeader(self, column): if self.params.have_ipc: headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "] else: headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] return headers[column] def columnAlignment(self, column): if self.params.have_ipc: alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] else: alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] return alignment[column] def DoFindSelect(self, query, match): QueryExec(query, "SELECT calls.id, comm_id, thread_id" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" " WHERE calls.id <> 0" " AND symbols.name" + match + " ORDER BY comm_id, thread_id, call_time, calls.id") def FindPath(self, query): # Turn the query result into a list of ids that the tree view can walk # to open the tree at the right place. ids = [] parent_id = query.value(0) while parent_id: ids.insert(0, parent_id) q2 = QSqlQuery(self.glb.db) QueryExec(q2, "SELECT parent_id" " FROM calls" " WHERE id = " + str(parent_id)) if not q2.next(): break parent_id = q2.value(0) ids.insert(0, query.value(2)) ids.insert(0, query.value(1)) return ids # Vertical layout class HBoxLayout(QHBoxLayout): def __init__(self, *children): super(HBoxLayout, self).__init__() self.layout().setContentsMargins(0, 0, 0, 0) for child in children: if child.isWidgetType(): self.layout().addWidget(child) else: self.layout().addLayout(child) # Horizontal layout class VBoxLayout(QVBoxLayout): def __init__(self, *children): super(VBoxLayout, self).__init__() self.layout().setContentsMargins(0, 0, 0, 0) for child in children: if child.isWidgetType(): self.layout().addWidget(child) else: self.layout().addLayout(child) # Vertical layout widget class VBox(): def __init__(self, *children): self.vbox = QWidget() self.vbox.setLayout(VBoxLayout(*children)) def Widget(self): return self.vbox # Tree window base class TreeWindowBase(QMdiSubWindow): def __init__(self, parent=None): super(TreeWindowBase, self).__init__(parent) self.model = None self.find_bar = None self.view = QTreeView() self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard self.context_menu = TreeContextMenu(self.view) def DisplayFound(self, ids): if not len(ids): return False parent = QModelIndex() for dbid in ids: found = False n = self.model.rowCount(parent) for row in xrange(n): child = self.model.index(row, 0, parent) if child.internalPointer().dbid == dbid: found = True self.view.setExpanded(parent, True) self.view.setCurrentIndex(child) parent = child break if not found: break return found def Find(self, value, direction, pattern, context): self.view.setFocus() self.find_bar.Busy() self.model.Find(value, direction, pattern, context, self.FindDone) def FindDone(self, ids): found = True if not self.DisplayFound(ids): found = False self.find_bar.Idle() if not found: self.find_bar.NotFound() # Context-sensitive call graph window class CallGraphWindow(TreeWindowBase): def __init__(self, glb, parent=None): super(CallGraphWindow, self).__init__(parent) self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x)) self.view.setModel(self.model) for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)): self.view.setColumnWidth(c, w) self.find_bar = FindBar(self, self) self.vbox = VBox(self.view, self.find_bar.Widget()) self.setWidget(self.vbox.Widget()) AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph") # Call tree window class CallTreeWindow(TreeWindowBase): def __init__(self, glb, parent=None, thread_at_time=None): super(CallTreeWindow, self).__init__(parent) self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x)) self.view.setModel(self.model) for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)): self.view.setColumnWidth(c, w) self.find_bar = FindBar(self, self) self.vbox = VBox(self.view, self.find_bar.Widget()) self.setWidget(self.vbox.Widget()) AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree") if thread_at_time: self.DisplayThreadAtTime(*thread_at_time) def DisplayThreadAtTime(self, comm_id, thread_id, time): parent = QModelIndex() for dbid in (comm_id, thread_id): found = False n = self.model.rowCount(parent) for row in xrange(n): child = self.model.index(row, 0, parent) if child.internalPointer().dbid == dbid: found = True self.view.setExpanded(parent, True) self.view.setCurrentIndex(child) parent = child break if not found: return found = False while True: n = self.model.rowCount(parent) if not n: return last_child = None for row in xrange(n): self.view.setExpanded(parent, True) child = self.model.index(row, 0, parent) child_call_time = child.internalPointer().call_time if child_call_time < time: last_child = child elif child_call_time == time: self.view.setCurrentIndex(child) return elif child_call_time > time: break if not last_child: if not found: child = self.model.index(0, 0, parent) self.view.setExpanded(parent, True) self.view.setCurrentIndex(child) return found = True self.view.setExpanded(parent, True) self.view.setCurrentIndex(last_child) parent = last_child # ExecComm() gets the comm_id of the command string that was set when the process exec'd i.e. the program name def ExecComm(db, thread_id, time): query = QSqlQuery(db) QueryExec(query, "SELECT comm_threads.comm_id, comms.c_time, comms.exec_flag" " FROM comm_threads" " INNER JOIN comms ON comms.id = comm_threads.comm_id" " WHERE comm_threads.thread_id = " + str(thread_id) + " ORDER BY comms.c_time, comms.id") first = None last = None while query.next(): if first is None: first = query.value(0) if query.value(2) and Decimal(query.value(1)) <= Decimal(time): last = query.value(0) if not(last is None): return last return first # Container for (x, y) data class XY(): def __init__(self, x=0, y=0): self.x = x self.y = y def __str__(self): return "XY({}, {})".format(str(self.x), str(self.y)) # Container for sub-range data class Subrange(): def __init__(self, lo=0, hi=0): self.lo = lo self.hi = hi def __str__(self): return "Subrange({}, {})".format(str(self.lo), str(self.hi)) # Graph data region base class class GraphDataRegion(object): def __init__(self, key, title = "", ordinal = ""): self.key = key self.title = title self.ordinal = ordinal # Function to sort GraphDataRegion def GraphDataRegionOrdinal(data_region): return data_region.ordinal # Attributes for a graph region class GraphRegionAttribute(): def __init__(self, colour): self.colour = colour # Switch graph data region represents a task class SwitchGraphDataRegion(GraphDataRegion): def __init__(self, key, exec_comm_id, pid, tid, comm, thread_id, comm_id): super(SwitchGraphDataRegion, self).__init__(key) self.title = str(pid) + " / " + str(tid) + " " + comm # Order graph legend within exec comm by pid / tid / time self.ordinal = str(pid).rjust(16) + str(exec_comm_id).rjust(8) + str(tid).rjust(16) self.exec_comm_id = exec_comm_id self.pid = pid self.tid = tid self.comm = comm self.thread_id = thread_id self.comm_id = comm_id # Graph data point class GraphDataPoint(): def __init__(self, data, index, x, y, altx=None, alty=None, hregion=None, vregion=None): self.data = data self.index = index self.x = x self.y = y self.altx = altx self.alty = alty self.hregion = hregion self.vregion = vregion # Graph data (single graph) base class class GraphData(object): def __init__(self, collection, xbase=Decimal(0), ybase=Decimal(0)): self.collection = collection self.points = [] self.xbase = xbase self.ybase = ybase self.title = "" def AddPoint(self, x, y, altx=None, alty=None, hregion=None, vregion=None): index = len(self.points) x = float(Decimal(x) - self.xbase) y = float(Decimal(y) - self.ybase) self.points.append(GraphDataPoint(self, index, x, y, altx, alty, hregion, vregion)) def XToData(self, x): return Decimal(x) + self.xbase def YToData(self, y): return Decimal(y) + self.ybase # Switch graph data (for one CPU) class SwitchGraphData(GraphData): def __init__(self, db, collection, cpu, xbase): super(SwitchGraphData, self).__init__(collection, xbase) self.cpu = cpu self.title = "CPU " + str(cpu) self.SelectSwitches(db) def SelectComms(self, db, thread_id, last_comm_id, start_time, end_time): query = QSqlQuery(db) QueryExec(query, "SELECT id, c_time" " FROM comms" " WHERE c_thread_id = " + str(thread_id) + " AND exec_flag = " + self.collection.glb.dbref.TRUE + " AND c_time >= " + str(start_time) + " AND c_time <= " + str(end_time) + " ORDER BY c_time, id") while query.next(): comm_id = query.value(0) if comm_id == last_comm_id: continue time = query.value(1) hregion = self.HRegion(db, thread_id, comm_id, time) self.AddPoint(time, 1000, None, None, hregion) def SelectSwitches(self, db): last_time = None last_comm_id = None last_thread_id = None query = QSqlQuery(db) QueryExec(query, "SELECT time, thread_out_id, thread_in_id, comm_out_id, comm_in_id, flags" " FROM context_switches" " WHERE machine_id = " + str(self.collection.machine_id) + " AND cpu = " + str(self.cpu) + " ORDER BY time, id") while query.next(): flags = int(query.value(5)) if flags & 1: # Schedule-out: detect and add exec's if last_thread_id == query.value(1) and last_comm_id is not None and last_comm_id != query.value(3): self.SelectComms(db, last_thread_id, last_comm_id, last_time, query.value(0)) continue # Schedule-in: add data point if len(self.points) == 0: start_time = self.collection.glb.StartTime(self.collection.machine_id) hregion = self.HRegion(db, query.value(1), query.value(3), start_time) self.AddPoint(start_time, 1000, None, None, hregion) time = query.value(0) comm_id = query.value(4) thread_id = query.value(2) hregion = self.HRegion(db, thread_id, comm_id, time) self.AddPoint(time, 1000, None, None, hregion) last_time = time last_comm_id = comm_id last_thread_id = thread_id def NewHRegion(self, db, key, thread_id, comm_id, time): exec_comm_id = ExecComm(db, thread_id, time) query = QSqlQuery(db) QueryExec(query, "SELECT pid, tid FROM threads WHERE id = " + str(thread_id)) if query.next(): pid = query.value(0) tid = query.value(1) else: pid = -1 tid = -1 query = QSqlQuery(db) QueryExec(query, "SELECT comm FROM comms WHERE id = " + str(comm_id)) if query.next(): comm = query.value(0) else: comm = "" return SwitchGraphDataRegion(key, exec_comm_id, pid, tid, comm, thread_id, comm_id) def HRegion(self, db, thread_id, comm_id, time): key = str(thread_id) + ":" + str(comm_id) hregion = self.collection.LookupHRegion(key) if hregion is None: hregion = self.NewHRegion(db, key, thread_id, comm_id, time) self.collection.AddHRegion(key, hregion) return hregion # Graph data collection (multiple related graphs) base class class GraphDataCollection(object): def __init__(self, glb): self.glb = glb self.data = [] self.hregions = {} self.xrangelo = None self.xrangehi = None self.yrangelo = None self.yrangehi = None self.dp = XY(0, 0) def AddGraphData(self, data): self.data.append(data) def LookupHRegion(self, key): if key in self.hregions: return self.hregions[key] return None def AddHRegion(self, key, hregion): self.hregions[key] = hregion # Switch graph data collection (SwitchGraphData for each CPU) class SwitchGraphDataCollection(GraphDataCollection): def __init__(self, glb, db, machine_id): super(SwitchGraphDataCollection, self).__init__(glb) self.machine_id = machine_id self.cpus = self.SelectCPUs(db) self.xrangelo = glb.StartTime(machine_id) self.xrangehi = glb.FinishTime(machine_id) self.yrangelo = Decimal(0) self.yrangehi = Decimal(1000) for cpu in self.cpus: self.AddGraphData(SwitchGraphData(db, self, cpu, self.xrangelo)) def SelectCPUs(self, db): cpus = [] query = QSqlQuery(db) QueryExec(query, "SELECT DISTINCT cpu" " FROM context_switches" " WHERE machine_id = " + str(self.machine_id)) while query.next(): cpus.append(int(query.value(0))) return sorted(cpus) # Switch graph data graphics item displays the graphed data class SwitchGraphDataGraphicsItem(QGraphicsItem): def __init__(self, data, graph_width, graph_height, attrs, event_handler, parent=None): super(SwitchGraphDataGraphicsItem, self).__init__(parent) self.data = data self.graph_width = graph_width self.graph_height = graph_height self.attrs = attrs self.event_handler = event_handler self.setAcceptHoverEvents(True) def boundingRect(self): return QRectF(0, 0, self.graph_width, self.graph_height) def PaintPoint(self, painter, last, x): if not(last is None or last.hregion.pid == 0 or x < self.attrs.subrange.x.lo): if last.x < self.attrs.subrange.x.lo: x0 = self.attrs.subrange.x.lo else: x0 = last.x if x > self.attrs.subrange.x.hi: x1 = self.attrs.subrange.x.hi else: x1 = x - 1 x0 = self.attrs.XToPixel(x0) x1 = self.attrs.XToPixel(x1) y0 = self.attrs.YToPixel(last.y) colour = self.attrs.region_attributes[last.hregion.key].colour width = x1 - x0 + 1 if width < 2: painter.setPen(colour) painter.drawLine(x0, self.graph_height - y0, x0, self.graph_height) else: painter.fillRect(x0, self.graph_height - y0, width, self.graph_height - 1, colour) def paint(self, painter, option, widget): last = None for point in self.data.points: self.PaintPoint(painter, last, point.x) if point.x > self.attrs.subrange.x.hi: break; last = point self.PaintPoint(painter, last, self.attrs.subrange.x.hi + 1) def BinarySearchPoint(self, target): lower_pos = 0 higher_pos = len(self.data.points) while True: pos = int((lower_pos + higher_pos) / 2) val = self.data.points[pos].x if target >= val: lower_pos = pos else: higher_pos = pos if higher_pos <= lower_pos + 1: return lower_pos def XPixelToData(self, x): x = self.attrs.PixelToX(x) if x < self.data.points[0].x: x = 0 pos = 0 low = True else: pos = self.BinarySearchPoint(x) low = False return (low, pos, self.data.XToData(x)) def EventToData(self, event): no_data = (None,) * 4 if len(self.data.points) < 1: return no_data x = event.pos().x() if x < 0: return no_data low0, pos0, time_from = self.XPixelToData(x) low1, pos1, time_to = self.XPixelToData(x + 1) hregions = set() hregion_times = [] if not low1: for i in xrange(pos0, pos1 + 1): hregion = self.data.points[i].hregion hregions.add(hregion) if i == pos0: time = time_from else: time = self.data.XToData(self.data.points[i].x) hregion_times.append((hregion, time)) return (time_from, time_to, hregions, hregion_times) def hoverMoveEvent(self, event): time_from, time_to, hregions, hregion_times = self.EventToData(event) if time_from is not None: self.event_handler.PointEvent(self.data.cpu, time_from, time_to, hregions) def hoverLeaveEvent(self, event): self.event_handler.NoPointEvent() def mousePressEvent(self, event): if event.button() != Qt.RightButton: super(SwitchGraphDataGraphicsItem, self).mousePressEvent(event) return time_from, time_to, hregions, hregion_times = self.EventToData(event) if hregion_times: self.event_handler.RightClickEvent(self.data.cpu, hregion_times, event.screenPos()) # X-axis graphics item class XAxisGraphicsItem(QGraphicsItem): def __init__(self, width, parent=None): super(XAxisGraphicsItem, self).__init__(parent) self.width = width self.max_mark_sz = 4 self.height = self.max_mark_sz + 1 def boundingRect(self): return QRectF(0, 0, self.width, self.height) def Step(self): attrs = self.parentItem().attrs subrange = attrs.subrange.x t = subrange.hi - subrange.lo s = (3.0 * t) / self.width n = 1.0 while s > n: n = n * 10.0 return n def PaintMarks(self, painter, at_y, lo, hi, step, i): attrs = self.parentItem().attrs x = lo while x <= hi: xp = attrs.XToPixel(x) if i % 10: if i % 5: sz = 1 else: sz = 2 else: sz = self.max_mark_sz i = 0 painter.drawLine(xp, at_y, xp, at_y + sz) x += step i += 1 def paint(self, painter, option, widget): # Using QPainter::drawLine(int x1, int y1, int x2, int y2) so x2 = width -1 painter.drawLine(0, 0, self.width - 1, 0) n = self.Step() attrs = self.parentItem().attrs subrange = attrs.subrange.x if subrange.lo: x_offset = n - (subrange.lo % n) else: x_offset = 0.0 x = subrange.lo + x_offset i = (x / n) % 10 self.PaintMarks(painter, 0, x, subrange.hi, n, i) def ScaleDimensions(self): n = self.Step() attrs = self.parentItem().attrs lo = attrs.subrange.x.lo hi = (n * 10.0) + lo width = attrs.XToPixel(hi) if width > 500: width = 0 return (n, lo, hi, width) def PaintScale(self, painter, at_x, at_y): n, lo, hi, width = self.ScaleDimensions() if not width: return painter.drawLine(at_x, at_y, at_x + width, at_y) self.PaintMarks(painter, at_y, lo, hi, n, 0) def ScaleWidth(self): n, lo, hi, width = self.ScaleDimensions() return width def ScaleHeight(self): return self.height def ScaleUnit(self): return self.Step() * 10 # Scale graphics item base class class ScaleGraphicsItem(QGraphicsItem): def __init__(self, axis, parent=None): super(ScaleGraphicsItem, self).__init__(parent) self.axis = axis def boundingRect(self): scale_width = self.axis.ScaleWidth() if not scale_width: return QRectF() return QRectF(0, 0, self.axis.ScaleWidth() + 100, self.axis.ScaleHeight()) def paint(self, painter, option, widget): scale_width = self.axis.ScaleWidth() if not scale_width: return self.axis.PaintScale(painter, 0, 5) x = scale_width + 4 painter.drawText(QPointF(x, 10), self.Text()) def Unit(self): return self.axis.ScaleUnit() def Text(self): return "" # Switch graph scale graphics item class SwitchScaleGraphicsItem(ScaleGraphicsItem): def __init__(self, axis, parent=None): super(SwitchScaleGraphicsItem, self).__init__(axis, parent) def Text(self): unit = self.Unit() if unit >= 1000000000: unit = int(unit / 1000000000) us = "s" elif unit >= 1000000: unit = int(unit / 1000000) us = "ms" elif unit >= 1000: unit = int(unit / 1000) us = "us" else: unit = int(unit) us = "ns" return " = " + str(unit) + " " + us # Switch graph graphics item contains graph title, scale, x/y-axis, and the graphed data class SwitchGraphGraphicsItem(QGraphicsItem): def __init__(self, collection, data, attrs, event_handler, first, parent=None): super(SwitchGraphGraphicsItem, self).__init__(parent) self.collection = collection self.data = data self.attrs = attrs self.event_handler = event_handler margin = 20 title_width = 50 self.title_graphics = QGraphicsSimpleTextItem(data.title, self) self.title_graphics.setPos(margin, margin) graph_width = attrs.XToPixel(attrs.subrange.x.hi) + 1 graph_height = attrs.YToPixel(attrs.subrange.y.hi) + 1 self.graph_origin_x = margin + title_width + margin self.graph_origin_y = graph_height + margin x_axis_size = 1 y_axis_size = 1 self.yline = QGraphicsLineItem(0, 0, 0, graph_height, self) self.x_axis = XAxisGraphicsItem(graph_width, self) self.x_axis.setPos(self.graph_origin_x, self.graph_origin_y + 1) if first: self.scale_item = SwitchScaleGraphicsItem(self.x_axis, self) self.scale_item.setPos(self.graph_origin_x, self.graph_origin_y + 10) self.yline.setPos(self.graph_origin_x - y_axis_size, self.graph_origin_y - graph_height) self.axis_point = QGraphicsLineItem(0, 0, 0, 0, self) self.axis_point.setPos(self.graph_origin_x - 1, self.graph_origin_y +1) self.width = self.graph_origin_x + graph_width + margin self.height = self.graph_origin_y + margin self.graph = SwitchGraphDataGraphicsItem(data, graph_width, graph_height, attrs, event_handler, self) self.graph.setPos(self.graph_origin_x, self.graph_origin_y - graph_height) if parent and 'EnableRubberBand' in dir(parent): parent.EnableRubberBand(self.graph_origin_x, self.graph_origin_x + graph_width - 1, self) def boundingRect(self): return QRectF(0, 0, self.width, self.height) def paint(self, painter, option, widget): pass def RBXToPixel(self, x): return self.attrs.PixelToX(x - self.graph_origin_x) def RBXRangeToPixel(self, x0, x1): return (self.RBXToPixel(x0), self.RBXToPixel(x1 + 1)) def RBPixelToTime(self, x): if x < self.data.points[0].x: return self.data.XToData(0) return self.data.XToData(x) def RBEventTimes(self, x0, x1): x0, x1 = self.RBXRangeToPixel(x0, x1) time_from = self.RBPixelToTime(x0) time_to = self.RBPixelToTime(x1) return (time_from, time_to) def RBEvent(self, x0, x1): time_from, time_to = self.RBEventTimes(x0, x1) self.event_handler.RangeEvent(time_from, time_to) def RBMoveEvent(self, x0, x1): if x1 < x0: x0, x1 = x1, x0 self.RBEvent(x0, x1) def RBReleaseEvent(self, x0, x1, selection_state): if x1 < x0: x0, x1 = x1, x0 x0, x1 = self.RBXRangeToPixel(x0, x1) self.event_handler.SelectEvent(x0, x1, selection_state) # Graphics item to draw a vertical bracket (used to highlight "forward" sub-range) class VerticalBracketGraphicsItem(QGraphicsItem): def __init__(self, parent=None): super(VerticalBracketGraphicsItem, self).__init__(parent) self.width = 0 self.height = 0 self.hide() def SetSize(self, width, height): self.width = width + 1 self.height = height + 1 def boundingRect(self): return QRectF(0, 0, self.width, self.height) def paint(self, painter, option, widget): colour = QColor(255, 255, 0, 32) painter.fillRect(0, 0, self.width, self.height, colour) x1 = self.width - 1 y1 = self.height - 1 painter.drawLine(0, 0, x1, 0) painter.drawLine(0, 0, 0, 3) painter.drawLine(x1, 0, x1, 3) painter.drawLine(0, y1, x1, y1) painter.drawLine(0, y1, 0, y1 - 3) painter.drawLine(x1, y1, x1, y1 - 3) # Graphics item to contain graphs arranged vertically class VertcalGraphSetGraphicsItem(QGraphicsItem): def __init__(self, collection, attrs, event_handler, child_class, parent=None): super(VertcalGraphSetGraphicsItem, self).__init__(parent) self.collection = collection self.top = 10 self.width = 0 self.height = self.top self.rubber_band = None self.rb_enabled = False first = True for data in collection.data: child = child_class(collection, data, attrs, event_handler, first, self) child.setPos(0, self.height + 1) rect = child.boundingRect() if rect.right() > self.width: self.width = rect.right() self.height = self.height + rect.bottom() + 1 first = False self.bracket = VerticalBracketGraphicsItem(self) def EnableRubberBand(self, xlo, xhi, rb_event_handler): if self.rb_enabled: return self.rb_enabled = True self.rb_in_view = False self.setAcceptedMouseButtons(Qt.LeftButton) self.rb_xlo = xlo self.rb_xhi = xhi self.rb_event_handler = rb_event_handler self.mousePressEvent = self.MousePressEvent self.mouseMoveEvent = self.MouseMoveEvent self.mouseReleaseEvent = self.MouseReleaseEvent def boundingRect(self): return QRectF(0, 0, self.width, self.height) def paint(self, painter, option, widget): pass def RubberBandParent(self): scene = self.scene() view = scene.views()[0] viewport = view.viewport() return viewport def RubberBandSetGeometry(self, rect): scene_rectf = self.mapRectToScene(QRectF(rect)) scene = self.scene() view = scene.views()[0] poly = view.mapFromScene(scene_rectf) self.rubber_band.setGeometry(poly.boundingRect()) def SetSelection(self, selection_state): if self.rubber_band: if selection_state: self.RubberBandSetGeometry(selection_state) self.rubber_band.show() else: self.rubber_band.hide() def SetBracket(self, rect): if rect: x, y, width, height = rect.x(), rect.y(), rect.width(), rect.height() self.bracket.setPos(x, y) self.bracket.SetSize(width, height) self.bracket.show() else: self.bracket.hide() def RubberBandX(self, event): x = event.pos().toPoint().x() if x < self.rb_xlo: x = self.rb_xlo elif x > self.rb_xhi: x = self.rb_xhi else: self.rb_in_view = True return x def RubberBandRect(self, x): if self.rb_origin.x() <= x: width = x - self.rb_origin.x() rect = QRect(self.rb_origin, QSize(width, self.height)) else: width = self.rb_origin.x() - x top_left = QPoint(self.rb_origin.x() - width, self.rb_origin.y()) rect = QRect(top_left, QSize(width, self.height)) return rect def MousePressEvent(self, event): self.rb_in_view = False x = self.RubberBandX(event) self.rb_origin = QPoint(x, self.top) if self.rubber_band is None: self.rubber_band = QRubberBand(QRubberBand.Rectangle, self.RubberBandParent()) self.RubberBandSetGeometry(QRect(self.rb_origin, QSize(0, self.height))) if self.rb_in_view: self.rubber_band.show() self.rb_event_handler.RBMoveEvent(x, x) else: self.rubber_band.hide() def MouseMoveEvent(self, event): x = self.RubberBandX(event) rect = self.RubberBandRect(x) self.RubberBandSetGeometry(rect) if self.rb_in_view: self.rubber_band.show() self.rb_event_handler.RBMoveEvent(self.rb_origin.x(), x) def MouseReleaseEvent(self, event): x = self.RubberBandX(event) if self.rb_in_view: selection_state = self.RubberBandRect(x) else: selection_state = None self.rb_event_handler.RBReleaseEvent(self.rb_origin.x(), x, selection_state) # Switch graph legend data model class SwitchGraphLegendModel(QAbstractTableModel): def __init__(self, collection, region_attributes, parent=None): super(SwitchGraphLegendModel, self).__init__(parent) self.region_attributes = region_attributes self.child_items = sorted(collection.hregions.values(), key=GraphDataRegionOrdinal) self.child_count = len(self.child_items) self.highlight_set = set() self.column_headers = ("pid", "tid", "comm") def rowCount(self, parent): return self.child_count def headerData(self, section, orientation, role): if role != Qt.DisplayRole: return None if orientation != Qt.Horizontal: return None return self.columnHeader(section) def index(self, row, column, parent): return self.createIndex(row, column, self.child_items[row]) def columnCount(self, parent=None): return len(self.column_headers) def columnHeader(self, column): return self.column_headers[column] def data(self, index, role): if role == Qt.BackgroundRole: child = self.child_items[index.row()] if child in self.highlight_set: return self.region_attributes[child.key].colour return None if role == Qt.ForegroundRole: child = self.child_items[index.row()] if child in self.highlight_set: return QColor(255, 255, 255) return self.region_attributes[child.key].colour if role != Qt.DisplayRole: return None hregion = self.child_items[index.row()] col = index.column() if col == 0: return hregion.pid if col == 1: return hregion.tid if col == 2: return hregion.comm return None def SetHighlight(self, row, set_highlight): child = self.child_items[row] top_left = self.createIndex(row, 0, child) bottom_right = self.createIndex(row, len(self.column_headers) - 1, child) self.dataChanged.emit(top_left, bottom_right) def Highlight(self, highlight_set): for row in xrange(self.child_count): child = self.child_items[row] if child in self.highlight_set: if child not in highlight_set: self.SetHighlight(row, False) elif child in highlight_set: self.SetHighlight(row, True) self.highlight_set = highlight_set # Switch graph legend is a table class SwitchGraphLegend(QWidget): def __init__(self, collection, region_attributes, parent=None): super(SwitchGraphLegend, self).__init__(parent) self.data_model = SwitchGraphLegendModel(collection, region_attributes) self.model = QSortFilterProxyModel() self.model.setSourceModel(self.data_model) self.view = QTableView() self.view.setModel(self.model) self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) self.view.verticalHeader().setVisible(False) self.view.sortByColumn(-1, Qt.AscendingOrder) self.view.setSortingEnabled(True) self.view.resizeColumnsToContents() self.view.resizeRowsToContents() self.vbox = VBoxLayout(self.view) self.setLayout(self.vbox) sz1 = self.view.columnWidth(0) + self.view.columnWidth(1) + self.view.columnWidth(2) + 2 sz1 = sz1 + self.view.verticalScrollBar().sizeHint().width() self.saved_size = sz1 def resizeEvent(self, event): self.saved_size = self.size().width() super(SwitchGraphLegend, self).resizeEvent(event) def Highlight(self, highlight_set): self.data_model.Highlight(highlight_set) self.update() def changeEvent(self, event): if event.type() == QEvent.FontChange: self.view.resizeRowsToContents() self.view.resizeColumnsToContents() # Need to resize rows again after column resize self.view.resizeRowsToContents() super(SwitchGraphLegend, self).changeEvent(event) # Random colour generation def RGBColourTooLight(r, g, b): if g > 230: return True if g <= 160: return False if r <= 180 and g <= 180: return False if r < 60: return False return True def GenerateColours(x): cs = [0] for i in xrange(1, x): cs.append(int((255.0 / i) + 0.5)) colours = [] for r in cs: for g in cs: for b in cs: # Exclude black and colours that look too light against a white background if (r, g, b) == (0, 0, 0) or RGBColourTooLight(r, g, b): continue colours.append(QColor(r, g, b)) return colours def GenerateNColours(n): for x in xrange(2, n + 2): colours = GenerateColours(x) if len(colours) >= n: return colours return [] def GenerateNRandomColours(n, seed): colours = GenerateNColours(n) random.seed(seed) random.shuffle(colours) return colours # Graph attributes, in particular the scale and subrange that change when zooming class GraphAttributes(): def __init__(self, scale, subrange, region_attributes, dp): self.scale = scale self.subrange = subrange self.region_attributes = region_attributes # Rounding avoids errors due to finite floating point precision self.dp = dp # data decimal places self.Update() def XToPixel(self, x): return int(round((x - self.subrange.x.lo) * self.scale.x, self.pdp.x)) def YToPixel(self, y): return int(round((y - self.subrange.y.lo) * self.scale.y, self.pdp.y)) def PixelToXRounded(self, px): return round((round(px, 0) / self.scale.x), self.dp.x) + self.subrange.x.lo def PixelToYRounded(self, py): return round((round(py, 0) / self.scale.y), self.dp.y) + self.subrange.y.lo def PixelToX(self, px): x = self.PixelToXRounded(px) if self.pdp.x == 0: rt = self.XToPixel(x) if rt > px: return x - 1 return x def PixelToY(self, py): y = self.PixelToYRounded(py) if self.pdp.y == 0: rt = self.YToPixel(y) if rt > py: return y - 1 return y def ToPDP(self, dp, scale): # Calculate pixel decimal places: # (10 ** dp) is the minimum delta in the data # scale it to get the minimum delta in pixels # log10 gives the number of decimals places negatively # subtrace 1 to divide by 10 # round to the lower negative number # change the sign to get the number of decimals positively x = math.log10((10 ** dp) * scale) if x < 0: x -= 1 x = -int(math.floor(x) - 0.1) else: x = 0 return x def Update(self): x = self.ToPDP(self.dp.x, self.scale.x) y = self.ToPDP(self.dp.y, self.scale.y) self.pdp = XY(x, y) # pixel decimal places # Switch graph splitter which divides the CPU graphs from the legend class SwitchGraphSplitter(QSplitter): def __init__(self, parent=None): super(SwitchGraphSplitter, self).__init__(parent) self.first_time = False def resizeEvent(self, ev): if self.first_time: self.first_time = False sz1 = self.widget(1).view.columnWidth(0) + self.widget(1).view.columnWidth(1) + self.widget(1).view.columnWidth(2) + 2 sz1 = sz1 + self.widget(1).view.verticalScrollBar().sizeHint().width() sz0 = self.size().width() - self.handleWidth() - sz1 self.setSizes([sz0, sz1]) elif not(self.widget(1).saved_size is None): sz1 = self.widget(1).saved_size sz0 = self.size().width() - self.handleWidth() - sz1 self.setSizes([sz0, sz1]) super(SwitchGraphSplitter, self).resizeEvent(ev) # Graph widget base class class GraphWidget(QWidget): graph_title_changed = Signal(object) def __init__(self, parent=None): super(GraphWidget, self).__init__(parent) def GraphTitleChanged(self, title): self.graph_title_changed.emit(title) def Title(self): return "" # Display time in s, ms, us or ns def ToTimeStr(val): val = Decimal(val) if val >= 1000000000: return "{} s".format((val / 1000000000).quantize(Decimal("0.000000001"))) if val >= 1000000: return "{} ms".format((val / 1000000).quantize(Decimal("0.000001"))) if val >= 1000: return "{} us".format((val / 1000).quantize(Decimal("0.001"))) return "{} ns".format(val.quantize(Decimal("1"))) # Switch (i.e. context switch i.e. Time Chart by CPU) graph widget which contains the CPU graphs and the legend and control buttons class SwitchGraphWidget(GraphWidget): def __init__(self, glb, collection, parent=None): super(SwitchGraphWidget, self).__init__(parent) self.glb = glb self.collection = collection self.back_state = [] self.forward_state = [] self.selection_state = (None, None) self.fwd_rect = None self.start_time = self.glb.StartTime(collection.machine_id) i = 0 hregions = collection.hregions.values() colours = GenerateNRandomColours(len(hregions), 1013) region_attributes = {} for hregion in hregions: if hregion.pid == 0 and hregion.tid == 0: region_attributes[hregion.key] = GraphRegionAttribute(QColor(0, 0, 0)) else: region_attributes[hregion.key] = GraphRegionAttribute(colours[i]) i = i + 1 # Default to entire range xsubrange = Subrange(0.0, float(collection.xrangehi - collection.xrangelo) + 1.0) ysubrange = Subrange(0.0, float(collection.yrangehi - collection.yrangelo) + 1.0) subrange = XY(xsubrange, ysubrange) scale = self.GetScaleForRange(subrange) self.attrs = GraphAttributes(scale, subrange, region_attributes, collection.dp) self.item = VertcalGraphSetGraphicsItem(collection, self.attrs, self, SwitchGraphGraphicsItem) self.scene = QGraphicsScene() self.scene.addItem(self.item) self.view = QGraphicsView(self.scene) self.view.centerOn(0, 0) self.view.setAlignment(Qt.AlignLeft | Qt.AlignTop) self.legend = SwitchGraphLegend(collection, region_attributes) self.splitter = SwitchGraphSplitter() self.splitter.addWidget(self.view) self.splitter.addWidget(self.legend) self.point_label = QLabel("") self.point_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) self.back_button = QToolButton() self.back_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft)) self.back_button.setDisabled(True) self.back_button.released.connect(lambda: self.Back()) self.forward_button = QToolButton() self.forward_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight)) self.forward_button.setDisabled(True) self.forward_button.released.connect(lambda: self.Forward()) self.zoom_button = QToolButton() self.zoom_button.setText("Zoom") self.zoom_button.setDisabled(True) self.zoom_button.released.connect(lambda: self.Zoom()) self.hbox = HBoxLayout(self.back_button, self.forward_button, self.zoom_button, self.point_label) self.vbox = VBoxLayout(self.splitter, self.hbox) self.setLayout(self.vbox) def GetScaleForRangeX(self, xsubrange): # Default graph 1000 pixels wide dflt = 1000.0 r = xsubrange.hi - xsubrange.lo return dflt / r def GetScaleForRangeY(self, ysubrange): # Default graph 50 pixels high dflt = 50.0 r = ysubrange.hi - ysubrange.lo return dflt / r def GetScaleForRange(self, subrange): # Default graph 1000 pixels wide, 50 pixels high xscale = self.GetScaleForRangeX(subrange.x) yscale = self.GetScaleForRangeY(subrange.y) return XY(xscale, yscale) def PointEvent(self, cpu, time_from, time_to, hregions): text = "CPU: " + str(cpu) time_from = time_from.quantize(Decimal(1)) rel_time_from = time_from - self.glb.StartTime(self.collection.machine_id) text = text + " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ")" self.point_label.setText(text) self.legend.Highlight(hregions) def RightClickEvent(self, cpu, hregion_times, pos): if not IsSelectable(self.glb.db, "calls", "WHERE parent_id >= 0"): return menu = QMenu(self.view) for hregion, time in hregion_times: thread_at_time = (hregion.exec_comm_id, hregion.thread_id, time) menu_text = "Show Call Tree for {} {}:{} at {}".format(hregion.comm, hregion.pid, hregion.tid, time) menu.addAction(CreateAction(menu_text, "Show Call Tree", lambda a=None, args=thread_at_time: self.RightClickSelect(args), self.view)) menu.exec_(pos) def RightClickSelect(self, args): CallTreeWindow(self.glb, self.glb.mainwindow, thread_at_time=args) def NoPointEvent(self): self.point_label.setText("") self.legend.Highlight({}) def RangeEvent(self, time_from, time_to): time_from = time_from.quantize(Decimal(1)) time_to = time_to.quantize(Decimal(1)) if time_to <= time_from: self.point_label.setText("") return rel_time_from = time_from - self.start_time rel_time_to = time_to - self.start_time text = " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ") to: " + str(time_to) + " (+" + ToTimeStr(rel_time_to) + ")" text = text + " duration: " + ToTimeStr(time_to - time_from) self.point_label.setText(text) def BackState(self): return (self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect) def PushBackState(self): state = copy.deepcopy(self.BackState()) self.back_state.append(state) self.back_button.setEnabled(True) def PopBackState(self): self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.back_state.pop() self.attrs.Update() if not self.back_state: self.back_button.setDisabled(True) def PushForwardState(self): state = copy.deepcopy(self.BackState()) self.forward_state.append(state) self.forward_button.setEnabled(True) def PopForwardState(self): self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.forward_state.pop() self.attrs.Update() if not self.forward_state: self.forward_button.setDisabled(True) def Title(self): time_from = self.collection.xrangelo + Decimal(self.attrs.subrange.x.lo) time_to = self.collection.xrangelo + Decimal(self.attrs.subrange.x.hi) rel_time_from = time_from - self.start_time rel_time_to = time_to - self.start_time title = "+" + ToTimeStr(rel_time_from) + " to +" + ToTimeStr(rel_time_to) title = title + " (" + ToTimeStr(time_to - time_from) + ")" return title def Update(self): selected_subrange, selection_state = self.selection_state self.item.SetSelection(selection_state) self.item.SetBracket(self.fwd_rect) self.zoom_button.setDisabled(selected_subrange is None) self.GraphTitleChanged(self.Title()) self.item.update(self.item.boundingRect()) def Back(self): if not self.back_state: return self.PushForwardState() self.PopBackState() self.Update() def Forward(self): if not self.forward_state: return self.PushBackState() self.PopForwardState() self.Update() def SelectEvent(self, x0, x1, selection_state): if selection_state is None: selected_subrange = None else: if x1 - x0 < 1.0: x1 += 1.0 selected_subrange = Subrange(x0, x1) self.selection_state = (selected_subrange, selection_state) self.zoom_button.setDisabled(selected_subrange is None) def Zoom(self): selected_subrange, selection_state = self.selection_state if selected_subrange is None: return self.fwd_rect = selection_state self.item.SetSelection(None) self.PushBackState() self.attrs.subrange.x = selected_subrange self.forward_state = [] self.forward_button.setDisabled(True) self.selection_state = (None, None) self.fwd_rect = None self.attrs.scale.x = self.GetScaleForRangeX(self.attrs.subrange.x) self.attrs.Update() self.Update() # Slow initialization - perform non-GUI initialization in a separate thread and put up a modal message box while waiting class SlowInitClass(): def __init__(self, glb, title, init_fn): self.init_fn = init_fn self.done = False self.result = None self.msg_box = QMessageBox(glb.mainwindow) self.msg_box.setText("Initializing " + title + ". Please wait.") self.msg_box.setWindowTitle("Initializing " + title) self.msg_box.setWindowIcon(glb.mainwindow.style().standardIcon(QStyle.SP_MessageBoxInformation)) self.init_thread = Thread(self.ThreadFn, glb) self.init_thread.done.connect(lambda: self.Done(), Qt.QueuedConnection) self.init_thread.start() def Done(self): self.msg_box.done(0) def ThreadFn(self, glb): conn_name = "SlowInitClass" + str(os.getpid()) db, dbname = glb.dbref.Open(conn_name) self.result = self.init_fn(db) self.done = True return (True, 0) def Result(self): while not self.done: self.msg_box.exec_() self.init_thread.wait() return self.result def SlowInit(glb, title, init_fn): init = SlowInitClass(glb, title, init_fn) return init.Result() # Time chart by CPU window class TimeChartByCPUWindow(QMdiSubWindow): def __init__(self, glb, parent=None): super(TimeChartByCPUWindow, self).__init__(parent) self.glb = glb self.machine_id = glb.HostMachineId() self.collection_name = "SwitchGraphDataCollection " + str(self.machine_id) collection = LookupModel(self.collection_name) if collection is None: collection = SlowInit(glb, "Time Chart", self.Init) self.widget = SwitchGraphWidget(glb, collection, self) self.view = self.widget self.base_title = "Time Chart by CPU" self.setWindowTitle(self.base_title + self.widget.Title()) self.widget.graph_title_changed.connect(self.GraphTitleChanged) self.setWidget(self.widget) AddSubWindow(glb.mainwindow.mdi_area, self, self.windowTitle()) def Init(self, db): return LookupCreateModel(self.collection_name, lambda : SwitchGraphDataCollection(self.glb, db, self.machine_id)) def GraphTitleChanged(self, title): self.setWindowTitle(self.base_title + " : " + title) # Child data item finder class ChildDataItemFinder(): def __init__(self, root): self.root = root self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5 self.rows = [] self.pos = 0 def FindSelect(self): self.rows = [] if self.pattern: pattern = re.compile(self.value) for child in self.root.child_items: for column_data in child.data: if re.search(pattern, str(column_data)) is not None: self.rows.append(child.row) break else: for child in self.root.child_items: for column_data in child.data: if self.value in str(column_data): self.rows.append(child.row) break def FindValue(self): self.pos = 0 if self.last_value != self.value or self.pattern != self.last_pattern: self.FindSelect() if not len(self.rows): return -1 return self.rows[self.pos] def FindThread(self): if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern: row = self.FindValue() elif len(self.rows): if self.direction > 0: self.pos += 1 if self.pos >= len(self.rows): self.pos = 0 else: self.pos -= 1 if self.pos < 0: self.pos = len(self.rows) - 1 row = self.rows[self.pos] else: row = -1 return (True, row) def Find(self, value, direction, pattern, context, callback): self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern) # Use a thread so the UI is not blocked thread = Thread(self.FindThread) thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection) thread.start() def FindDone(self, thread, callback, row): callback(row) # Number of database records to fetch in one go glb_chunk_sz = 10000 # Background process for SQL data fetcher class SQLFetcherProcess(): def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep): # Need a unique connection name conn_name = "SQLFetcher" + str(os.getpid()) self.db, dbname = dbref.Open(conn_name) self.sql = sql self.buffer = buffer self.head = head self.tail = tail self.fetch_count = fetch_count self.fetching_done = fetching_done self.process_target = process_target self.wait_event = wait_event self.fetched_event = fetched_event self.prep = prep self.query = QSqlQuery(self.db) self.query_limit = 0 if "$$last_id$$" in sql else 2 self.last_id = -1 self.fetched = 0 self.more = True self.local_head = self.head.value self.local_tail = self.tail.value def Select(self): if self.query_limit: if self.query_limit == 1: return self.query_limit -= 1 stmt = self.sql.replace("$$last_id$$", str(self.last_id)) QueryExec(self.query, stmt) def Next(self): if not self.query.next(): self.Select() if not self.query.next(): return None self.last_id = self.query.value(0) return self.prep(self.query) def WaitForTarget(self): while True: self.wait_event.clear() target = self.process_target.value if target > self.fetched or target < 0: break self.wait_event.wait() return target def HasSpace(self, sz): if self.local_tail <= self.local_head: space = len(self.buffer) - self.local_head if space > sz: return True if space >= glb_nsz: # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL) self.buffer[self.local_head : self.local_head + len(nd)] = nd self.local_head = 0 if self.local_tail - self.local_head > sz: return True return False def WaitForSpace(self, sz): if self.HasSpace(sz): return while True: self.wait_event.clear() self.local_tail = self.tail.value if self.HasSpace(sz): return self.wait_event.wait() def AddToBuffer(self, obj): d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) n = len(d) nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL) sz = n + glb_nsz self.WaitForSpace(sz) pos = self.local_head self.buffer[pos : pos + len(nd)] = nd self.buffer[pos + glb_nsz : pos + sz] = d self.local_head += sz def FetchBatch(self, batch_size): fetched = 0 while batch_size > fetched: obj = self.Next() if obj is None: self.more = False break self.AddToBuffer(obj) fetched += 1 if fetched: self.fetched += fetched with self.fetch_count.get_lock(): self.fetch_count.value += fetched self.head.value = self.local_head self.fetched_event.set() def Run(self): while self.more: target = self.WaitForTarget() if target < 0: break batch_size = min(glb_chunk_sz, target - self.fetched) self.FetchBatch(batch_size) self.fetching_done.value = True self.fetched_event.set() def SQLFetcherFn(*x): process = SQLFetcherProcess(*x) process.Run() # SQL data fetcher class SQLFetcher(QObject): done = Signal(object) def __init__(self, glb, sql, prep, process_data, parent=None): super(SQLFetcher, self).__init__(parent) self.process_data = process_data self.more = True self.target = 0 self.last_target = 0 self.fetched = 0 self.buffer_size = 16 * 1024 * 1024 self.buffer = Array(c_char, self.buffer_size, lock=False) self.head = Value(c_longlong) self.tail = Value(c_longlong) self.local_tail = 0 self.fetch_count = Value(c_longlong) self.fetching_done = Value(c_bool) self.last_count = 0 self.process_target = Value(c_longlong) self.wait_event = Event() self.fetched_event = Event() glb.AddInstanceToShutdownOnExit(self) self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep)) self.process.start() self.thread = Thread(self.Thread) self.thread.done.connect(self.ProcessData, Qt.QueuedConnection) self.thread.start() def Shutdown(self): # Tell the thread and process to exit self.process_target.value = -1 self.wait_event.set() self.more = False self.fetching_done.value = True self.fetched_event.set() def Thread(self): if not self.more: return True, 0 while True: self.fetched_event.clear() fetch_count = self.fetch_count.value if fetch_count != self.last_count: break if self.fetching_done.value: self.more = False return True, 0 self.fetched_event.wait() count = fetch_count - self.last_count self.last_count = fetch_count self.fetched += count return False, count def Fetch(self, nr): if not self.more: # -1 inidcates there are no more return -1 result = self.fetched extra = result + nr - self.target if extra > 0: self.target += extra # process_target < 0 indicates shutting down if self.process_target.value >= 0: self.process_target.value = self.target self.wait_event.set() return result def RemoveFromBuffer(self): pos = self.local_tail if len(self.buffer) - pos < glb_nsz: pos = 0 n = pickle.loads(self.buffer[pos : pos + glb_nsz]) if n == 0: pos = 0 n = pickle.loads(self.buffer[0 : glb_nsz]) pos += glb_nsz obj = pickle.loads(self.buffer[pos : pos + n]) self.local_tail = pos + n return obj def ProcessData(self, count): for i in xrange(count): obj = self.RemoveFromBuffer() self.process_data(obj) self.tail.value = self.local_tail self.wait_event.set() self.done.emit(count) # Fetch more records bar class FetchMoreRecordsBar(): def __init__(self, model, parent): self.model = model self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:") self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.fetch_count = QSpinBox() self.fetch_count.setRange(1, 1000000) self.fetch_count.setValue(10) self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.fetch = QPushButton("Go!") self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.fetch.released.connect(self.FetchMoreRecords) self.progress = QProgressBar() self.progress.setRange(0, 100) self.progress.hide() self.done_label = QLabel("All records fetched") self.done_label.hide() self.spacer = QLabel("") self.close_button = QToolButton() self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton)) self.close_button.released.connect(self.Deactivate) self.hbox = QHBoxLayout() self.hbox.setContentsMargins(0, 0, 0, 0) self.hbox.addWidget(self.label) self.hbox.addWidget(self.fetch_count) self.hbox.addWidget(self.fetch) self.hbox.addWidget(self.spacer) self.hbox.addWidget(self.progress) self.hbox.addWidget(self.done_label) self.hbox.addWidget(self.close_button) self.bar = QWidget() self.bar.setLayout(self.hbox) self.bar.show() self.in_progress = False self.model.progress.connect(self.Progress) self.done = False if not model.HasMoreRecords(): self.Done() def Widget(self): return self.bar def Activate(self): self.bar.show() self.fetch.setFocus() def Deactivate(self): self.bar.hide() def Enable(self, enable): self.fetch.setEnabled(enable) self.fetch_count.setEnabled(enable) def Busy(self): self.Enable(False) self.fetch.hide() self.spacer.hide() self.progress.show() def Idle(self): self.in_progress = False self.Enable(True) self.progress.hide() self.fetch.show() self.spacer.show() def Target(self): return self.fetch_count.value() * glb_chunk_sz def Done(self): self.done = True self.Idle() self.label.hide() self.fetch_count.hide() self.fetch.hide() self.spacer.hide() self.done_label.show() def Progress(self, count): if self.in_progress: if count: percent = ((count - self.start) * 100) / self.Target() if percent >= 100: self.Idle() else: self.progress.setValue(percent) if not count: # Count value of zero means no more records self.Done() def FetchMoreRecords(self): if self.done: return self.progress.setValue(0) self.Busy() self.in_progress = True self.start = self.model.FetchMoreRecords(self.Target()) # Brance data model level two item class BranchLevelTwoItem(): def __init__(self, row, col, text, parent_item): self.row = row self.parent_item = parent_item self.data = [""] * (col + 1) self.data[col] = text self.level = 2 def getParentItem(self): return self.parent_item def getRow(self): return self.row def childCount(self): return 0 def hasChildren(self): return False def getData(self, column): return self.data[column] # Brance data model level one item class BranchLevelOneItem(): def __init__(self, glb, row, data, parent_item): self.glb = glb self.row = row self.parent_item = parent_item self.child_count = 0 self.child_items = [] self.data = data[1:] self.dbid = data[0] self.level = 1 self.query_done = False self.br_col = len(self.data) - 1 def getChildItem(self, row): return self.child_items[row] def getParentItem(self): return self.parent_item def getRow(self): return self.row def Select(self): self.query_done = True if not self.glb.have_disassembler: return query = QSqlQuery(self.glb.db) QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip" " FROM samples" " INNER JOIN dsos ON samples.to_dso_id = dsos.id" " INNER JOIN symbols ON samples.to_symbol_id = symbols.id" " WHERE samples.id = " + str(self.dbid)) if not query.next(): return cpu = query.value(0) dso = query.value(1) sym = query.value(2) if dso == 0 or sym == 0: return off = query.value(3) short_name = query.value(4) long_name = query.value(5) build_id = query.value(6) sym_start = query.value(7) ip = query.value(8) QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start" " FROM samples" " INNER JOIN symbols ON samples.symbol_id = symbols.id" " WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) + " ORDER BY samples.id" " LIMIT 1") if not query.next(): return if query.value(0) != dso: # Cannot disassemble from one dso to another return bsym = query.value(1) boff = query.value(2) bsym_start = query.value(3) if bsym == 0: return tot = bsym_start + boff + 1 - sym_start - off if tot <= 0 or tot > 16384: return inst = self.glb.disassembler.Instruction() f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id) if not f: return mode = 0 if Is64Bit(f) else 1 self.glb.disassembler.SetMode(inst, mode) buf_sz = tot + 16 buf = create_string_buffer(tot + 16) f.seek(sym_start + off) buf.value = f.read(buf_sz) buf_ptr = addressof(buf) i = 0 while tot > 0: cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip) if cnt: byte_str = tohex(ip).rjust(16) for k in xrange(cnt): byte_str += " %02x" % ord(buf[i]) i += 1 while k < 15: byte_str += " " k += 1 self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self)) self.child_count += 1 else: return buf_ptr += cnt tot -= cnt buf_sz -= cnt ip += cnt def childCount(self): if not self.query_done: self.Select() if not self.child_count: return -1 return self.child_count def hasChildren(self): if not self.query_done: return True return self.child_count > 0 def getData(self, column): return self.data[column] # Brance data model root item class BranchRootItem(): def __init__(self): self.child_count = 0 self.child_items = [] self.level = 0 def getChildItem(self, row): return self.child_items[row] def getParentItem(self): return None def getRow(self): return 0 def childCount(self): return self.child_count def hasChildren(self): return self.child_count > 0 def getData(self, column): return "" # Calculate instructions per cycle def CalcIPC(cyc_cnt, insn_cnt): if cyc_cnt and insn_cnt: ipc = Decimal(float(insn_cnt) / cyc_cnt) ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP)) else: ipc = "0" return ipc # Branch data preparation def BranchDataPrepBr(query, data): data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + " (" + dsoname(query.value(11)) + ")" + " -> " + tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + " (" + dsoname(query.value(15)) + ")") def BranchDataPrepIPC(query, data): insn_cnt = query.value(16) cyc_cnt = query.value(17) ipc = CalcIPC(cyc_cnt, insn_cnt) data.append(insn_cnt) data.append(cyc_cnt) data.append(ipc) def BranchDataPrep(query): data = [] for i in xrange(0, 8): data.append(query.value(i)) BranchDataPrepBr(query, data) return data def BranchDataPrepWA(query): data = [] data.append(query.value(0)) # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string data.append("{:>19}".format(query.value(1))) for i in xrange(2, 8): data.append(query.value(i)) BranchDataPrepBr(query, data) return data def BranchDataWithIPCPrep(query): data = [] for i in xrange(0, 8): data.append(query.value(i)) BranchDataPrepIPC(query, data) BranchDataPrepBr(query, data) return data def BranchDataWithIPCPrepWA(query): data = [] data.append(query.value(0)) # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string data.append("{:>19}".format(query.value(1))) for i in xrange(2, 8): data.append(query.value(i)) BranchDataPrepIPC(query, data) BranchDataPrepBr(query, data) return data # Branch data model class BranchModel(TreeModel): progress = Signal(object) def __init__(self, glb, event_id, where_clause, parent=None): super(BranchModel, self).__init__(glb, None, parent) self.event_id = event_id self.more = True self.populated = 0 self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count") if self.have_ipc: select_ipc = ", insn_count, cyc_count" prep_fn = BranchDataWithIPCPrep prep_wa_fn = BranchDataWithIPCPrepWA else: select_ipc = "" prep_fn = BranchDataPrep prep_wa_fn = BranchDataPrepWA sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name," " CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END," " ip, symbols.name, sym_offset, dsos.short_name," " to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name" + select_ipc + " FROM samples" " INNER JOIN comms ON comm_id = comms.id" " INNER JOIN threads ON thread_id = threads.id" " INNER JOIN branch_types ON branch_type = branch_types.id" " INNER JOIN symbols ON symbol_id = symbols.id" " INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id" " INNER JOIN dsos ON samples.dso_id = dsos.id" " INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id" " WHERE samples.id > $$last_id$$" + where_clause + " AND evsel_id = " + str(self.event_id) + " ORDER BY samples.id" " LIMIT " + str(glb_chunk_sz)) if pyside_version_1 and sys.version_info[0] == 3: prep = prep_fn else: prep = prep_wa_fn self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample) self.fetcher.done.connect(self.Update) self.fetcher.Fetch(glb_chunk_sz) def GetRoot(self): return BranchRootItem() def columnCount(self, parent=None): if self.have_ipc: return 11 else: return 8 def columnHeader(self, column): if self.have_ipc: return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column] else: return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column] def columnFont(self, column): if self.have_ipc: br_col = 10 else: br_col = 7 if column != br_col: return None return QFont("Monospace") def DisplayData(self, item, index): if item.level == 1: self.FetchIfNeeded(item.row) return item.getData(index.column()) def AddSample(self, data): child = BranchLevelOneItem(self.glb, self.populated, data, self.root) self.root.child_items.append(child) self.populated += 1 def Update(self, fetched): if not fetched: self.more = False self.progress.emit(0) child_count = self.root.child_count count = self.populated - child_count if count > 0: parent = QModelIndex() self.beginInsertRows(parent, child_count, child_count + count - 1) self.insertRows(child_count, count, parent) self.root.child_count += count self.endInsertRows() self.progress.emit(self.root.child_count) def FetchMoreRecords(self, count): current = self.root.child_count if self.more: self.fetcher.Fetch(count) else: self.progress.emit(0) return current def HasMoreRecords(self): return self.more # Report Variables class ReportVars(): def __init__(self, name = "", where_clause = "", limit = ""): self.name = name self.where_clause = where_clause self.limit = limit def UniqueId(self): return str(self.where_clause + ";" + self.limit) # Branch window class BranchWindow(QMdiSubWindow): def __init__(self, glb, event_id, report_vars, parent=None): super(BranchWindow, self).__init__(parent) model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId() self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause)) self.view = QTreeView() self.view.setUniformRowHeights(True) self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard self.view.setModel(self.model) self.ResizeColumnsToContents() self.context_menu = TreeContextMenu(self.view) self.find_bar = FindBar(self, self, True) self.finder = ChildDataItemFinder(self.model.root) self.fetch_bar = FetchMoreRecordsBar(self.model, self) self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) self.setWidget(self.vbox.Widget()) AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events") def ResizeColumnToContents(self, column, n): # Using the view's resizeColumnToContents() here is extrememly slow # so implement a crude alternative mm = "MM" if column else "MMMM" font = self.view.font() metrics = QFontMetrics(font) max = 0 for row in xrange(n): val = self.model.root.child_items[row].data[column] len = metrics.width(str(val) + mm) max = len if len > max else max val = self.model.columnHeader(column) len = metrics.width(str(val) + mm) max = len if len > max else max self.view.setColumnWidth(column, max) def ResizeColumnsToContents(self): n = min(self.model.root.child_count, 100) if n < 1: # No data yet, so connect a signal to notify when there is self.model.rowsInserted.connect(self.UpdateColumnWidths) return columns = self.model.columnCount() for i in xrange(columns): self.ResizeColumnToContents(i, n) def UpdateColumnWidths(self, *x): # This only needs to be done once, so disconnect the signal now self.model.rowsInserted.disconnect(self.UpdateColumnWidths) self.ResizeColumnsToContents() def Find(self, value, direction, pattern, context): self.view.setFocus() self.find_bar.Busy() self.finder.Find(value, direction, pattern, context, self.FindDone) def FindDone(self, row): self.find_bar.Idle() if row >= 0: self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) else: self.find_bar.NotFound() # Line edit data item class LineEditDataItem(object): def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""): self.glb = glb self.label = label self.placeholder_text = placeholder_text self.parent = parent self.id = id self.value = default self.widget = QLineEdit(default) self.widget.editingFinished.connect(self.Validate) self.widget.textChanged.connect(self.Invalidate) self.red = False self.error = "" self.validated = True if placeholder_text: self.widget.setPlaceholderText(placeholder_text) def TurnTextRed(self): if not self.red: palette = QPalette() palette.setColor(QPalette.Text,Qt.red) self.widget.setPalette(palette) self.red = True def TurnTextNormal(self): if self.red: palette = QPalette() self.widget.setPalette(palette) self.red = False def InvalidValue(self, value): self.value = "" self.TurnTextRed() self.error = self.label + " invalid value '" + value + "'" self.parent.ShowMessage(self.error) def Invalidate(self): self.validated = False def DoValidate(self, input_string): self.value = input_string.strip() def Validate(self): self.validated = True self.error = "" self.TurnTextNormal() self.parent.ClearMessage() input_string = self.widget.text() if not len(input_string.strip()): self.value = "" return self.DoValidate(input_string) def IsValid(self): if not self.validated: self.Validate() if len(self.error): self.parent.ShowMessage(self.error) return False return True def IsNumber(self, value): try: x = int(value) except: x = 0 return str(x) == value # Non-negative integer ranges dialog data item class NonNegativeIntegerRangesDataItem(LineEditDataItem): def __init__(self, glb, label, placeholder_text, column_name, parent): super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent) self.column_name = column_name def DoValidate(self, input_string): singles = [] ranges = [] for value in [x.strip() for x in input_string.split(",")]: if "-" in value: vrange = value.split("-") if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]): return self.InvalidValue(value) ranges.append(vrange) else: if not self.IsNumber(value): return self.InvalidValue(value) singles.append(value) ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges] if len(singles): ranges.append(self.column_name + " IN (" + ",".join(singles) + ")") self.value = " OR ".join(ranges) # Positive integer dialog data item class PositiveIntegerDataItem(LineEditDataItem): def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""): super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default) def DoValidate(self, input_string): if not self.IsNumber(input_string.strip()): return self.InvalidValue(input_string) value = int(input_string.strip()) if value <= 0: return self.InvalidValue(input_string) self.value = str(value) # Dialog data item converted and validated using a SQL table class SQLTableDataItem(LineEditDataItem): def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent): super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent) self.table_name = table_name self.match_column = match_column self.column_name1 = column_name1 self.column_name2 = column_name2 def ValueToIds(self, value): ids = [] query = QSqlQuery(self.glb.db) stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'" ret = query.exec_(stmt) if ret: while query.next(): ids.append(str(query.value(0))) return ids def DoValidate(self, input_string): all_ids = [] for value in [x.strip() for x in input_string.split(",")]: ids = self.ValueToIds(value) if len(ids): all_ids.extend(ids) else: return self.InvalidValue(value) self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")" if self.column_name2: self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )" # Sample time ranges dialog data item converted and validated using 'samples' SQL table class SampleTimeRangesDataItem(LineEditDataItem): def __init__(self, glb, label, placeholder_text, column_name, parent): self.column_name = column_name self.last_id = 0 self.first_time = 0 self.last_time = 2 ** 64 query = QSqlQuery(glb.db) QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1") if query.next(): self.last_id = int(query.value(0)) self.first_time = int(glb.HostStartTime()) self.last_time = int(glb.HostFinishTime()) if placeholder_text: placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time) super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent) def IdBetween(self, query, lower_id, higher_id, order): QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1") if query.next(): return True, int(query.value(0)) else: return False, 0 def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor): query = QSqlQuery(self.glb.db) while True: next_id = int((lower_id + higher_id) / 2) QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id)) if not query.next(): ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC") if not ok: ok, dbid = self.IdBetween(query, next_id, higher_id, "") if not ok: return str(higher_id) next_id = dbid QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id)) next_time = int(query.value(0)) if get_floor: if target_time > next_time: lower_id = next_id else: higher_id = next_id if higher_id <= lower_id + 1: return str(higher_id) else: if target_time >= next_time: lower_id = next_id else: higher_id = next_id if higher_id <= lower_id + 1: return str(lower_id) def ConvertRelativeTime(self, val): mult = 1 suffix = val[-2:] if suffix == "ms": mult = 1000000 elif suffix == "us": mult = 1000 elif suffix == "ns": mult = 1 else: return val val = val[:-2].strip() if not self.IsNumber(val): return val val = int(val) * mult if val >= 0: val += self.first_time else: val += self.last_time return str(val) def ConvertTimeRange(self, vrange): if vrange[0] == "": vrange[0] = str(self.first_time) if vrange[1] == "": vrange[1] = str(self.last_time) vrange[0] = self.ConvertRelativeTime(vrange[0]) vrange[1] = self.ConvertRelativeTime(vrange[1]) if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]): return False beg_range = max(int(vrange[0]), self.first_time) end_range = min(int(vrange[1]), self.last_time) if beg_range > self.last_time or end_range < self.first_time: return False vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True) vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False) return True def AddTimeRange(self, value, ranges): n = value.count("-") if n == 1: pass elif n == 2: if value.split("-")[1].strip() == "": n = 1 elif n == 3: n = 2 else: return False pos = findnth(value, "-", n) vrange = [value[:pos].strip() ,value[pos+1:].strip()] if self.ConvertTimeRange(vrange): ranges.append(vrange) return True return False def DoValidate(self, input_string): ranges = [] for value in [x.strip() for x in input_string.split(",")]: if not self.AddTimeRange(value, ranges): return self.InvalidValue(value) ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges] self.value = " OR ".join(ranges) # Report Dialog Base class ReportDialogBase(QDialog): def __init__(self, glb, title, items, partial, parent=None): super(ReportDialogBase, self).__init__(parent) self.glb = glb self.report_vars = ReportVars() self.setWindowTitle(title) self.setMinimumWidth(600) self.data_items = [x(glb, self) for x in items] self.partial = partial self.grid = QGridLayout() for row in xrange(len(self.data_items)): self.grid.addWidget(QLabel(self.data_items[row].label), row, 0) self.grid.addWidget(self.data_items[row].widget, row, 1) self.status = QLabel() self.ok_button = QPushButton("Ok", self) self.ok_button.setDefault(True) self.ok_button.released.connect(self.Ok) self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.cancel_button = QPushButton("Cancel", self) self.cancel_button.released.connect(self.reject) self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.hbox = QHBoxLayout() #self.hbox.addStretch() self.hbox.addWidget(self.status) self.hbox.addWidget(self.ok_button) self.hbox.addWidget(self.cancel_button) self.vbox = QVBoxLayout() self.vbox.addLayout(self.grid) self.vbox.addLayout(self.hbox) self.setLayout(self.vbox) def Ok(self): vars = self.report_vars for d in self.data_items: if d.id == "REPORTNAME": vars.name = d.value if not vars.name: self.ShowMessage("Report name is required") return for d in self.data_items: if not d.IsValid(): return for d in self.data_items[1:]: if d.id == "LIMIT": vars.limit = d.value elif len(d.value): if len(vars.where_clause): vars.where_clause += " AND " vars.where_clause += d.value if len(vars.where_clause): if self.partial: vars.where_clause = " AND ( " + vars.where_clause + " ) " else: vars.where_clause = " WHERE " + vars.where_clause + " " self.accept() def ShowMessage(self, msg): self.status.setText("<font color=#FF0000>" + msg) def ClearMessage(self): self.status.setText("") # Selected branch report creation dialog class SelectedBranchDialog(ReportDialogBase): def __init__(self, glb, parent=None): title = "Selected Branches" items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"), lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p), lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p), lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p), lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p), lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p), lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p), lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p), lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p)) super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent) # Event list def GetEventList(db): events = [] query = QSqlQuery(db) QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id") while query.next(): events.append(query.value(0)) return events # Is a table selectable def IsSelectable(db, table, sql = "", columns = "*"): query = QSqlQuery(db) try: QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1") except: return False return True # SQL table data model item class SQLTableItem(): def __init__(self, row, data): self.row = row self.data = data def getData(self, column): return self.data[column] # SQL table data model class SQLTableModel(TableModel): progress = Signal(object) def __init__(self, glb, sql, column_headers, parent=None): super(SQLTableModel, self).__init__(parent) self.glb = glb self.more = True self.populated = 0 self.column_headers = column_headers self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample) self.fetcher.done.connect(self.Update) self.fetcher.Fetch(glb_chunk_sz) def DisplayData(self, item, index): self.FetchIfNeeded(item.row) return item.getData(index.column()) def AddSample(self, data): child = SQLTableItem(self.populated, data) self.child_items.append(child) self.populated += 1 def Update(self, fetched): if not fetched: self.more = False self.progress.emit(0) child_count = self.child_count count = self.populated - child_count if count > 0: parent = QModelIndex() self.beginInsertRows(parent, child_count, child_count + count - 1) self.insertRows(child_count, count, parent) self.child_count += count self.endInsertRows() self.progress.emit(self.child_count) def FetchMoreRecords(self, count): current = self.child_count if self.more: self.fetcher.Fetch(count) else: self.progress.emit(0) return current def HasMoreRecords(self): return self.more def columnCount(self, parent=None): return len(self.column_headers) def columnHeader(self, column): return self.column_headers[column] def SQLTableDataPrep(self, query, count): data = [] for i in xrange(count): data.append(query.value(i)) return data # SQL automatic table data model class SQLAutoTableModel(SQLTableModel): def __init__(self, glb, table_name, parent=None): sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz) if table_name == "comm_threads_view": # For now, comm_threads_view has no id column sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz) column_headers = [] query = QSqlQuery(glb.db) if glb.dbref.is_sqlite3: QueryExec(query, "PRAGMA table_info(" + table_name + ")") while query.next(): column_headers.append(query.value(1)) if table_name == "sqlite_master": sql = "SELECT * FROM " + table_name else: if table_name[:19] == "information_schema.": sql = "SELECT * FROM " + table_name select_table_name = table_name[19:] schema = "information_schema" else: select_table_name = table_name schema = "public" QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") while query.next(): column_headers.append(query.value(0)) if pyside_version_1 and sys.version_info[0] == 3: if table_name == "samples_view": self.SQLTableDataPrep = self.samples_view_DataPrep if table_name == "samples": self.SQLTableDataPrep = self.samples_DataPrep super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) def samples_view_DataPrep(self, query, count): data = [] data.append(query.value(0)) # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string data.append("{:>19}".format(query.value(1))) for i in xrange(2, count): data.append(query.value(i)) return data def samples_DataPrep(self, query, count): data = [] for i in xrange(9): data.append(query.value(i)) # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string data.append("{:>19}".format(query.value(9))) for i in xrange(10, count): data.append(query.value(i)) return data # Base class for custom ResizeColumnsToContents class ResizeColumnsToContentsBase(QObject): def __init__(self, parent=None): super(ResizeColumnsToContentsBase, self).__init__(parent) def ResizeColumnToContents(self, column, n): # Using the view's resizeColumnToContents() here is extrememly slow # so implement a crude alternative font = self.view.font() metrics = QFontMetrics(font) max = 0 for row in xrange(n): val = self.data_model.child_items[row].data[column] len = metrics.width(str(val) + "MM") max = len if len > max else max val = self.data_model.columnHeader(column) len = metrics.width(str(val) + "MM") max = len if len > max else max self.view.setColumnWidth(column, max) def ResizeColumnsToContents(self): n = min(self.data_model.child_count, 100) if n < 1: # No data yet, so connect a signal to notify when there is self.data_model.rowsInserted.connect(self.UpdateColumnWidths) return columns = self.data_model.columnCount() for i in xrange(columns): self.ResizeColumnToContents(i, n) def UpdateColumnWidths(self, *x): # This only needs to be done once, so disconnect the signal now self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths) self.ResizeColumnsToContents() # Convert value to CSV def ToCSValue(val): if '"' in val: val = val.replace('"', '""') if "," in val or '"' in val: val = '"' + val + '"' return val # Key to sort table model indexes by row / column, assuming fewer than 1000 columns glb_max_cols = 1000 def RowColumnKey(a): return a.row() * glb_max_cols + a.column() # Copy selected table cells to clipboard def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False): indexes = sorted(view.selectedIndexes(), key=RowColumnKey) idx_cnt = len(indexes) if not idx_cnt: return if idx_cnt == 1: with_hdr=False min_row = indexes[0].row() max_row = indexes[0].row() min_col = indexes[0].column() max_col = indexes[0].column() for i in indexes: min_row = min(min_row, i.row()) max_row = max(max_row, i.row()) min_col = min(min_col, i.column()) max_col = max(max_col, i.column()) if max_col > glb_max_cols: raise RuntimeError("glb_max_cols is too low") max_width = [0] * (1 + max_col - min_col) for i in indexes: c = i.column() - min_col max_width[c] = max(max_width[c], len(str(i.data()))) text = "" pad = "" sep = "" if with_hdr: model = indexes[0].model() for col in range(min_col, max_col + 1): val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole) if as_csv: text += sep + ToCSValue(val) sep = "," else: c = col - min_col max_width[c] = max(max_width[c], len(val)) width = max_width[c] align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole) if align & Qt.AlignRight: val = val.rjust(width) text += pad + sep + val pad = " " * (width - len(val)) sep = " " text += "\n" pad = "" sep = "" last_row = min_row for i in indexes: if i.row() > last_row: last_row = i.row() text += "\n" pad = "" sep = "" if as_csv: text += sep + ToCSValue(str(i.data())) sep = "," else: width = max_width[i.column() - min_col] if i.data(Qt.TextAlignmentRole) & Qt.AlignRight: val = str(i.data()).rjust(width) else: val = str(i.data()) text += pad + sep + val pad = " " * (width - len(val)) sep = " " QApplication.clipboard().setText(text) def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False): indexes = view.selectedIndexes() if not len(indexes): return selection = view.selectionModel() first = None for i in indexes: above = view.indexAbove(i) if not selection.isSelected(above): first = i break if first is None: raise RuntimeError("CopyTreeCellsToClipboard internal error") model = first.model() row_cnt = 0 col_cnt = model.columnCount(first) max_width = [0] * col_cnt indent_sz = 2 indent_str = " " * indent_sz expanded_mark_sz = 2 if sys.version_info[0] == 3: expanded_mark = "\u25BC " not_expanded_mark = "\u25B6 " else: expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8") not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8") leaf_mark = " " if not as_csv: pos = first while True: row_cnt += 1 row = pos.row() for c in range(col_cnt): i = pos.sibling(row, c) if c: n = len(str(i.data())) else: n = len(str(i.data()).strip()) n += (i.internalPointer().level - 1) * indent_sz n += expanded_mark_sz max_width[c] = max(max_width[c], n) pos = view.indexBelow(pos) if not selection.isSelected(pos): break text = "" pad = "" sep = "" if with_hdr: for c in range(col_cnt): val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip() if as_csv: text += sep + ToCSValue(val) sep = "," else: max_width[c] = max(max_width[c], len(val)) width = max_width[c] align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole) if align & Qt.AlignRight: val = val.rjust(width) text += pad + sep + val pad = " " * (width - len(val)) sep = " " text += "\n" pad = "" sep = "" pos = first while True: row = pos.row() for c in range(col_cnt): i = pos.sibling(row, c) val = str(i.data()) if not c: if model.hasChildren(i): if view.isExpanded(i): mark = expanded_mark else: mark = not_expanded_mark else: mark = leaf_mark val = indent_str * (i.internalPointer().level - 1) + mark + val.strip() if as_csv: text += sep + ToCSValue(val) sep = "," else: width = max_width[c] if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight: val = val.rjust(width) text += pad + sep + val pad = " " * (width - len(val)) sep = " " pos = view.indexBelow(pos) if not selection.isSelected(pos): break text = text.rstrip() + "\n" pad = "" sep = "" QApplication.clipboard().setText(text) def CopyCellsToClipboard(view, as_csv=False, with_hdr=False): view.CopyCellsToClipboard(view, as_csv, with_hdr) def CopyCellsToClipboardHdr(view): CopyCellsToClipboard(view, False, True) def CopyCellsToClipboardCSV(view): CopyCellsToClipboard(view, True, True) # Context menu class ContextMenu(object): def __init__(self, view): self.view = view self.view.setContextMenuPolicy(Qt.CustomContextMenu) self.view.customContextMenuRequested.connect(self.ShowContextMenu) def ShowContextMenu(self, pos): menu = QMenu(self.view) self.AddActions(menu) menu.exec_(self.view.mapToGlobal(pos)) def AddCopy(self, menu): menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view)) menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view)) def AddActions(self, menu): self.AddCopy(menu) class TreeContextMenu(ContextMenu): def __init__(self, view): super(TreeContextMenu, self).__init__(view) def AddActions(self, menu): i = self.view.currentIndex() text = str(i.data()).strip() if len(text): menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view)) self.AddCopy(menu) # Table window class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase): def __init__(self, glb, table_name, parent=None): super(TableWindow, self).__init__(parent) self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name)) self.model = QSortFilterProxyModel() self.model.setSourceModel(self.data_model) self.view = QTableView() self.view.setModel(self.model) self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) self.view.verticalHeader().setVisible(False) self.view.sortByColumn(-1, Qt.AscendingOrder) self.view.setSortingEnabled(True) self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) self.view.CopyCellsToClipboard = CopyTableCellsToClipboard self.ResizeColumnsToContents() self.context_menu = ContextMenu(self.view) self.find_bar = FindBar(self, self, True) self.finder = ChildDataItemFinder(self.data_model) self.fetch_bar = FetchMoreRecordsBar(self.data_model, self) self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) self.setWidget(self.vbox.Widget()) AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table") def Find(self, value, direction, pattern, context): self.view.setFocus() self.find_bar.Busy() self.finder.Find(value, direction, pattern, context, self.FindDone) def FindDone(self, row): self.find_bar.Idle() if row >= 0: self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex()))) else: self.find_bar.NotFound() # Table list def GetTableList(glb): tables = [] query = QSqlQuery(glb.db) if glb.dbref.is_sqlite3: QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name") else: QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name") while query.next(): tables.append(query.value(0)) if glb.dbref.is_sqlite3: tables.append("sqlite_master") else: tables.append("information_schema.tables") tables.append("information_schema.views") tables.append("information_schema.columns") return tables # Top Calls data model class TopCallsModel(SQLTableModel): def __init__(self, glb, report_vars, parent=None): text = "" if not glb.dbref.is_sqlite3: text = "::text" limit = "" if len(report_vars.limit): limit = " LIMIT " + report_vars.limit sql = ("SELECT comm, pid, tid, name," " CASE" " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text + " ELSE short_name" " END AS dso," " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, " " CASE" " WHEN (calls.flags = 1) THEN 'no call'" + text + " WHEN (calls.flags = 2) THEN 'no return'" + text + " WHEN (calls.flags = 3) THEN 'no call/return'" + text + " ELSE ''" + text + " END AS flags" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" " INNER JOIN dsos ON symbols.dso_id = dsos.id" " INNER JOIN comms ON calls.comm_id = comms.id" " INNER JOIN threads ON calls.thread_id = threads.id" + report_vars.where_clause + " ORDER BY elapsed_time DESC" + limit ) column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags") self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft) super(TopCallsModel, self).__init__(glb, sql, column_headers, parent) def columnAlignment(self, column): return self.alignment[column] # Top Calls report creation dialog class TopCallsDialog(ReportDialogBase): def __init__(self, glb, parent=None): title = "Top Calls by Elapsed Time" items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"), lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p), lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p), lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p), lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p), lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p), lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p), lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100")) super(TopCallsDialog, self).__init__(glb, title, items, False, parent) # Top Calls window class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase): def __init__(self, glb, report_vars, parent=None): super(TopCallsWindow, self).__init__(parent) self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars)) self.model = self.data_model self.view = QTableView() self.view.setModel(self.model) self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) self.view.verticalHeader().setVisible(False) self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) self.view.CopyCellsToClipboard = CopyTableCellsToClipboard self.context_menu = ContextMenu(self.view) self.ResizeColumnsToContents() self.find_bar = FindBar(self, self, True) self.finder = ChildDataItemFinder(self.model) self.fetch_bar = FetchMoreRecordsBar(self.data_model, self) self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) self.setWidget(self.vbox.Widget()) AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name) def Find(self, value, direction, pattern, context): self.view.setFocus() self.find_bar.Busy() self.finder.Find(value, direction, pattern, context, self.FindDone) def FindDone(self, row): self.find_bar.Idle() if row >= 0: self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) else: self.find_bar.NotFound() # Action Definition def CreateAction(label, tip, callback, parent=None, shortcut=None): action = QAction(label, parent) if shortcut != None: action.setShortcuts(shortcut) action.setStatusTip(tip) action.triggered.connect(callback) return action # Typical application actions def CreateExitAction(app, parent=None): return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit) # Typical MDI actions def CreateCloseActiveWindowAction(mdi_area): return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area) def CreateCloseAllWindowsAction(mdi_area): return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area) def CreateTileWindowsAction(mdi_area): return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area) def CreateCascadeWindowsAction(mdi_area): return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area) def CreateNextWindowAction(mdi_area): return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild) def CreatePreviousWindowAction(mdi_area): return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild) # Typical MDI window menu class WindowMenu(): def __init__(self, mdi_area, menu): self.mdi_area = mdi_area self.window_menu = menu.addMenu("&Windows") self.close_active_window = CreateCloseActiveWindowAction(mdi_area) self.close_all_windows = CreateCloseAllWindowsAction(mdi_area) self.tile_windows = CreateTileWindowsAction(mdi_area) self.cascade_windows = CreateCascadeWindowsAction(mdi_area) self.next_window = CreateNextWindowAction(mdi_area) self.previous_window = CreatePreviousWindowAction(mdi_area) self.window_menu.aboutToShow.connect(self.Update) def Update(self): self.window_menu.clear() sub_window_count = len(self.mdi_area.subWindowList()) have_sub_windows = sub_window_count != 0 self.close_active_window.setEnabled(have_sub_windows) self.close_all_windows.setEnabled(have_sub_windows) self.tile_windows.setEnabled(have_sub_windows) self.cascade_windows.setEnabled(have_sub_windows) self.next_window.setEnabled(have_sub_windows) self.previous_window.setEnabled(have_sub_windows) self.window_menu.addAction(self.close_active_window) self.window_menu.addAction(self.close_all_windows) self.window_menu.addSeparator() self.window_menu.addAction(self.tile_windows) self.window_menu.addAction(self.cascade_windows) self.window_menu.addSeparator() self.window_menu.addAction(self.next_window) self.window_menu.addAction(self.previous_window) if sub_window_count == 0: return self.window_menu.addSeparator() nr = 1 for sub_window in self.mdi_area.subWindowList(): label = str(nr) + " " + sub_window.name if nr < 10: label = "&" + label action = self.window_menu.addAction(label) action.setCheckable(True) action.setChecked(sub_window == self.mdi_area.activeSubWindow()) action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x)) self.window_menu.addAction(action) nr += 1 def setActiveSubWindow(self, nr): self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1]) # Help text glb_help_text = """ <h1>Contents</h1> <style> p.c1 { text-indent: 40px; } p.c2 { text-indent: 80px; } } </style> <p class=c1><a href=#reports>1. Reports</a></p> <p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p> <p class=c2><a href=#calltree>1.2 Call Tree</a></p> <p class=c2><a href=#allbranches>1.3 All branches</a></p> <p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p> <p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p> <p class=c1><a href=#charts>2. Charts</a></p> <p class=c2><a href=#timechartbycpu>2.1 Time chart by CPU</a></p> <p class=c1><a href=#tables>3. Tables</a></p> <h1 id=reports>1. Reports</h1> <h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2> The result is a GUI window with a tree representing a context-sensitive call-graph. Expanding a couple of levels of the tree and adjusting column widths to suit will display something like: <pre> Call Graph: pt_example Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%) v- ls v- 2638:2638 v- _start ld-2.19.so 1 10074071 100.0 211135 100.0 |- unknown unknown 1 13198 0.1 1 0.0 >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3 >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3 v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4 >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1 >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0 >- __libc_csu_init ls 1 10354 0.1 10 0.0 |- _setjmp libc-2.19.so 1 0 0.0 4 0.0 v- main ls 1 8182043 99.6 180254 99.9 </pre> <h3>Points to note:</h3> <ul> <li>The top level is a command name (comm)</li> <li>The next level is a thread (pid:tid)</li> <li>Subsequent levels are functions</li> <li>'Count' is the number of calls</li> <li>'Time' is the elapsed time until the function returns</li> <li>Percentages are relative to the level above</li> <li>'Branch Count' is the total number of branches for that function and all functions that it calls </ul> <h3>Find</h3> Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match. The pattern matching symbols are ? for any character and * for zero or more characters. <h2 id=calltree>1.2 Call Tree</h2> The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated. Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'. <h2 id=allbranches>1.3 All branches</h2> The All branches report displays all branches in chronological order. Not all data is fetched immediately. More records can be fetched using the Fetch bar provided. <h3>Disassembly</h3> Open a branch to display disassembly. This only works if: <ol> <li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li> <li>The object code is available. Currently, only the perf build ID cache is searched for object code. The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR. One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu), or alternatively, set environment variable PERF_KCORE to the kcore file name.</li> </ol> <h4 id=xed>Intel XED Setup</h4> To use Intel XED, libxed.so must be present. To build and install libxed.so: <pre> git clone https://github.com/intelxed/mbuild.git mbuild git clone https://github.com/intelxed/xed cd xed ./mfile.py --share sudo ./mfile.py --prefix=/usr/local install sudo ldconfig </pre> <h3>Instructions per Cycle (IPC)</h3> If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'. <p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch. Due to the granularity of timing information, the number of cycles for some code blocks will not be known. In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period since the previous displayed 'IPC'. <h3>Find</h3> Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. Refer to Python documentation for the regular expression syntax. All columns are searched, but only currently fetched rows are searched. <h2 id=selectedbranches>1.4 Selected branches</h2> This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together. <h3>1.4.1 Time ranges</h3> The time ranges hint text shows the total time range. Relative time ranges can also be entered in ms, us or ns. Also, negative values are relative to the end of trace. Examples: <pre> 81073085947329-81073085958238 From 81073085947329 to 81073085958238 100us-200us From 100us to 200us 10ms- From 10ms to the end -100ns The first 100ns -10ms- The last 10ms </pre> N.B. Due to the granularity of timestamps, there could be no branches in any given time range. <h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2> The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned. The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together. If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar. <h1 id=charts>2. Charts</h1> <h2 id=timechartbycpu>2.1 Time chart by CPU</h2> This chart displays context switch information when that data is available. Refer to context_switches_view on the Tables menu. <h3>Features</h3> <ol> <li>Mouse over to highight the task and show the time</li> <li>Drag the mouse to select a region and zoom by pushing the Zoom button</li> <li>Go back and forward by pressing the arrow buttons</li> <li>If call information is available, right-click to show a call tree opened to that task and time. Note, the call tree may take some time to appear, and there may not be call information for the task or time selected. </li> </ol> <h3>Important</h3> The graph can be misleading in the following respects: <ol> <li>The graph shows the first task on each CPU as running from the beginning of the time range. Because tracing might start on different CPUs at different times, that is not necessarily the case. Refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li> <li>Similarly, the last task on each CPU can be showing running longer than it really was. Again, refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li> <li>When the mouse is over a task, the highlighted task might not be visible on the legend without scrolling if the legend does not fit fully in the window</li> </ol> <h1 id=tables>3. Tables</h1> The Tables menu shows all tables and views in the database. Most tables have an associated view which displays the information in a more friendly way. Not all data for large tables is fetched immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted, but that can be slow for large tables. <p>There are also tables of database meta-information. For SQLite3 databases, the sqlite_master table is included. For PostgreSQL databases, information_schema.tables/views/columns are included. <h3>Find</h3> Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. Refer to Python documentation for the regular expression syntax. All columns are searched, but only currently fetched rows are searched. <p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous will go to the next/previous result in id order, instead of display order. """ # Help window class HelpWindow(QMdiSubWindow): def __init__(self, glb, parent=None): super(HelpWindow, self).__init__(parent) self.text = QTextBrowser() self.text.setHtml(glb_help_text) self.text.setReadOnly(True) self.text.setOpenExternalLinks(True) self.setWidget(self.text) AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help") # Main window that only displays the help text class HelpOnlyWindow(QMainWindow): def __init__(self, parent=None): super(HelpOnlyWindow, self).__init__(parent) self.setMinimumSize(200, 100) self.resize(800, 600) self.setWindowTitle("Exported SQL Viewer Help") self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation)) self.text = QTextBrowser() self.text.setHtml(glb_help_text) self.text.setReadOnly(True) self.text.setOpenExternalLinks(True) self.setCentralWidget(self.text) # PostqreSQL server version def PostqreSQLServerVersion(db): query = QSqlQuery(db) QueryExec(query, "SELECT VERSION()") if query.next(): v_str = query.value(0) v_list = v_str.strip().split(" ") if v_list[0] == "PostgreSQL" and v_list[2] == "on": return v_list[1] return v_str return "Unknown" # SQLite version def SQLiteVersion(db): query = QSqlQuery(db) QueryExec(query, "SELECT sqlite_version()") if query.next(): return query.value(0) return "Unknown" # About dialog class AboutDialog(QDialog): def __init__(self, glb, parent=None): super(AboutDialog, self).__init__(parent) self.setWindowTitle("About Exported SQL Viewer") self.setMinimumWidth(300) pyside_version = "1" if pyside_version_1 else "2" text = "<pre>" text += "Python version: " + sys.version.split(" ")[0] + "\n" text += "PySide version: " + pyside_version + "\n" text += "Qt version: " + qVersion() + "\n" if glb.dbref.is_sqlite3: text += "SQLite version: " + SQLiteVersion(glb.db) + "\n" else: text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n" text += "</pre>" self.text = QTextBrowser() self.text.setHtml(text) self.text.setReadOnly(True) self.text.setOpenExternalLinks(True) self.vbox = QVBoxLayout() self.vbox.addWidget(self.text) self.setLayout(self.vbox) # Font resize def ResizeFont(widget, diff): font = widget.font() sz = font.pointSize() font.setPointSize(sz + diff) widget.setFont(font) def ShrinkFont(widget): ResizeFont(widget, -1) def EnlargeFont(widget): ResizeFont(widget, 1) # Unique name for sub-windows def NumberedWindowName(name, nr): if nr > 1: name += " <" + str(nr) + ">" return name def UniqueSubWindowName(mdi_area, name): nr = 1 while True: unique_name = NumberedWindowName(name, nr) ok = True for sub_window in mdi_area.subWindowList(): if sub_window.name == unique_name: ok = False break if ok: return unique_name nr += 1 # Add a sub-window def AddSubWindow(mdi_area, sub_window, name): unique_name = UniqueSubWindowName(mdi_area, name) sub_window.setMinimumSize(200, 100) sub_window.resize(800, 600) sub_window.setWindowTitle(unique_name) sub_window.setAttribute(Qt.WA_DeleteOnClose) sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon)) sub_window.name = unique_name mdi_area.addSubWindow(sub_window) sub_window.show() # Main window class MainWindow(QMainWindow): def __init__(self, glb, parent=None): super(MainWindow, self).__init__(parent) self.glb = glb self.setWindowTitle("Exported SQL Viewer: " + glb.dbname) self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon)) self.setMinimumSize(200, 100) self.mdi_area = QMdiArea() self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) self.setCentralWidget(self.mdi_area) menu = self.menuBar() file_menu = menu.addMenu("&File") file_menu.addAction(CreateExitAction(glb.app, self)) edit_menu = menu.addMenu("&Edit") edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy)) edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self)) edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find)) edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)])) edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")])) edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")])) reports_menu = menu.addMenu("&Reports") if IsSelectable(glb.db, "calls"): reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self)) if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"): reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self)) self.EventMenu(GetEventList(glb.db), reports_menu) if IsSelectable(glb.db, "calls"): reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self)) if IsSelectable(glb.db, "context_switches"): charts_menu = menu.addMenu("&Charts") charts_menu.addAction(CreateAction("&Time chart by CPU", "Create a new window displaying time charts by CPU", self.TimeChartByCPU, self)) self.TableMenu(GetTableList(glb), menu) self.window_menu = WindowMenu(self.mdi_area, menu) help_menu = menu.addMenu("&Help") help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents)) help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self)) def Try(self, fn): win = self.mdi_area.activeSubWindow() if win: try: fn(win.view) except: pass def CopyToClipboard(self): self.Try(CopyCellsToClipboardHdr) def CopyToClipboardCSV(self): self.Try(CopyCellsToClipboardCSV) def Find(self): win = self.mdi_area.activeSubWindow() if win: try: win.find_bar.Activate() except: pass def FetchMoreRecords(self): win = self.mdi_area.activeSubWindow() if win: try: win.fetch_bar.Activate() except: pass def ShrinkFont(self): self.Try(ShrinkFont) def EnlargeFont(self): self.Try(EnlargeFont) def EventMenu(self, events, reports_menu): branches_events = 0 for event in events: event = event.split(":")[0] if event == "branches": branches_events += 1 dbid = 0 for event in events: dbid += 1 event = event.split(":")[0] if event == "branches": label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")" reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self)) label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")" reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self)) def TimeChartByCPU(self): TimeChartByCPUWindow(self.glb, self) def TableMenu(self, tables, menu): table_menu = menu.addMenu("&Tables") for table in tables: table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self)) def NewCallGraph(self): CallGraphWindow(self.glb, self) def NewCallTree(self): CallTreeWindow(self.glb, self) def NewTopCalls(self): dialog = TopCallsDialog(self.glb, self) ret = dialog.exec_() if ret: TopCallsWindow(self.glb, dialog.report_vars, self) def NewBranchView(self, event_id): BranchWindow(self.glb, event_id, ReportVars(), self) def NewSelectedBranchView(self, event_id): dialog = SelectedBranchDialog(self.glb, self) ret = dialog.exec_() if ret: BranchWindow(self.glb, event_id, dialog.report_vars, self) def NewTableView(self, table_name): TableWindow(self.glb, table_name, self) def Help(self): HelpWindow(self.glb, self) def About(self): dialog = AboutDialog(self.glb, self) dialog.exec_() def TryOpen(file_name): try: return open(file_name, "rb") except: return None def Is64Bit(f): result = sizeof(c_void_p) # ELF support only pos = f.tell() f.seek(0) header = f.read(7) f.seek(pos) magic = header[0:4] if sys.version_info[0] == 2: eclass = ord(header[4]) encoding = ord(header[5]) version = ord(header[6]) else: eclass = header[4] encoding = header[5] version = header[6] if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: result = True if eclass == 2 else False return result # Global data class Glb(): def __init__(self, dbref, db, dbname): self.dbref = dbref self.db = db self.dbname = dbname self.home_dir = os.path.expanduser("~") self.buildid_dir = os.getenv("PERF_BUILDID_DIR") if self.buildid_dir: self.buildid_dir += "/.build-id/" else: self.buildid_dir = self.home_dir + "/.debug/.build-id/" self.app = None self.mainwindow = None self.instances_to_shutdown_on_exit = weakref.WeakSet() try: self.disassembler = LibXED() self.have_disassembler = True except: self.have_disassembler = False self.host_machine_id = 0 self.host_start_time = 0 self.host_finish_time = 0 def FileFromBuildId(self, build_id): file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf" return TryOpen(file_name) def FileFromNamesAndBuildId(self, short_name, long_name, build_id): # Assume current machine i.e. no support for virtualization if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore": file_name = os.getenv("PERF_KCORE") f = TryOpen(file_name) if file_name else None if f: return f # For now, no special handling if long_name is /proc/kcore f = TryOpen(long_name) if f: return f f = self.FileFromBuildId(build_id) if f: return f return None def AddInstanceToShutdownOnExit(self, instance): self.instances_to_shutdown_on_exit.add(instance) # Shutdown any background processes or threads def ShutdownInstances(self): for x in self.instances_to_shutdown_on_exit: try: x.Shutdown() except: pass def GetHostMachineId(self): query = QSqlQuery(self.db) QueryExec(query, "SELECT id FROM machines WHERE pid = -1") if query.next(): self.host_machine_id = query.value(0) else: self.host_machine_id = 0 return self.host_machine_id def HostMachineId(self): if self.host_machine_id: return self.host_machine_id return self.GetHostMachineId() def SelectValue(self, sql): query = QSqlQuery(self.db) try: QueryExec(query, sql) except: return None if query.next(): return Decimal(query.value(0)) return None def SwitchesMinTime(self, machine_id): return self.SelectValue("SELECT time" " FROM context_switches" " WHERE time != 0 AND machine_id = " + str(machine_id) + " ORDER BY id LIMIT 1") def SwitchesMaxTime(self, machine_id): return self.SelectValue("SELECT time" " FROM context_switches" " WHERE time != 0 AND machine_id = " + str(machine_id) + " ORDER BY id DESC LIMIT 1") def SamplesMinTime(self, machine_id): return self.SelectValue("SELECT time" " FROM samples" " WHERE time != 0 AND machine_id = " + str(machine_id) + " ORDER BY id LIMIT 1") def SamplesMaxTime(self, machine_id): return self.SelectValue("SELECT time" " FROM samples" " WHERE time != 0 AND machine_id = " + str(machine_id) + " ORDER BY id DESC LIMIT 1") def CallsMinTime(self, machine_id): return self.SelectValue("SELECT calls.call_time" " FROM calls" " INNER JOIN threads ON threads.thread_id = calls.thread_id" " WHERE calls.call_time != 0 AND threads.machine_id = " + str(machine_id) + " ORDER BY calls.id LIMIT 1") def CallsMaxTime(self, machine_id): return self.SelectValue("SELECT calls.return_time" " FROM calls" " INNER JOIN threads ON threads.thread_id = calls.thread_id" " WHERE calls.return_time != 0 AND threads.machine_id = " + str(machine_id) + " ORDER BY calls.return_time DESC LIMIT 1") def GetStartTime(self, machine_id): t0 = self.SwitchesMinTime(machine_id) t1 = self.SamplesMinTime(machine_id) t2 = self.CallsMinTime(machine_id) if t0 is None or (not(t1 is None) and t1 < t0): t0 = t1 if t0 is None or (not(t2 is None) and t2 < t0): t0 = t2 return t0 def GetFinishTime(self, machine_id): t0 = self.SwitchesMaxTime(machine_id) t1 = self.SamplesMaxTime(machine_id) t2 = self.CallsMaxTime(machine_id) if t0 is None or (not(t1 is None) and t1 > t0): t0 = t1 if t0 is None or (not(t2 is None) and t2 > t0): t0 = t2 return t0 def HostStartTime(self): if self.host_start_time: return self.host_start_time self.host_start_time = self.GetStartTime(self.HostMachineId()) return self.host_start_time def HostFinishTime(self): if self.host_finish_time: return self.host_finish_time self.host_finish_time = self.GetFinishTime(self.HostMachineId()) return self.host_finish_time def StartTime(self, machine_id): if machine_id == self.HostMachineId(): return self.HostStartTime() return self.GetStartTime(machine_id) def FinishTime(self, machine_id): if machine_id == self.HostMachineId(): return self.HostFinishTime() return self.GetFinishTime(machine_id) # Database reference class DBRef(): def __init__(self, is_sqlite3, dbname): self.is_sqlite3 = is_sqlite3 self.dbname = dbname self.TRUE = "TRUE" self.FALSE = "FALSE" # SQLite prior to version 3.23 does not support TRUE and FALSE if self.is_sqlite3: self.TRUE = "1" self.FALSE = "0" def Open(self, connection_name): dbname = self.dbname if self.is_sqlite3: db = QSqlDatabase.addDatabase("QSQLITE", connection_name) else: db = QSqlDatabase.addDatabase("QPSQL", connection_name) opts = dbname.split() for opt in opts: if "=" in opt: opt = opt.split("=") if opt[0] == "hostname": db.setHostName(opt[1]) elif opt[0] == "port": db.setPort(int(opt[1])) elif opt[0] == "username": db.setUserName(opt[1]) elif opt[0] == "password": db.setPassword(opt[1]) elif opt[0] == "dbname": dbname = opt[1] else: dbname = opt db.setDatabaseName(dbname) if not db.open(): raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) return db, dbname # Main def Main(): usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \ " or: exported-sql-viewer.py --help-only" ap = argparse.ArgumentParser(usage = usage_str, add_help = False) ap.add_argument("--pyside-version-1", action='store_true') ap.add_argument("dbname", nargs="?") ap.add_argument("--help-only", action='store_true') args = ap.parse_args() if args.help_only: app = QApplication(sys.argv) mainwindow = HelpOnlyWindow() mainwindow.show() err = app.exec_() sys.exit(err) dbname = args.dbname if dbname is None: ap.print_usage() print("Too few arguments") sys.exit(1) is_sqlite3 = False try: f = open(dbname, "rb") if f.read(15) == b'SQLite format 3': is_sqlite3 = True f.close() except: pass dbref = DBRef(is_sqlite3, dbname) db, dbname = dbref.Open("main") glb = Glb(dbref, db, dbname) app = QApplication(sys.argv) glb.app = app mainwindow = MainWindow(glb) glb.mainwindow = mainwindow mainwindow.show() err = app.exec_() glb.ShutdownInstances() db.close() sys.exit(err) if __name__ == "__main__": Main()
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/exported-sql-viewer.py
# Display a process of packets and processed time. # SPDX-License-Identifier: GPL-2.0 # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * from functools import cmp_to_key all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print("%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t']))) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print("%d.%06dsec cpu=%d" % (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)) for i in range(len(irq_list)): print(PF_IRQ_ENTRY % (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name'])) print(PF_JOINT) irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print(PF_NET_RX % (diff_msec(base_t, irq_event['time']), irq_event['skbaddr'])) print(PF_JOINT) print(PF_SOFT_ENTRY % diff_msec(base_t, hunk['sirq_ent_t'])) print(PF_JOINT) event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print(PF_NAPI_POLL % (diff_msec(base_t, event['event_t']), event['dev'])) if i == len(event_list) - 1: print("") else: print(PF_JOINT) else: print(PF_NET_RECV % (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len'])) if 'comm' in event.keys(): print(PF_WJOINT) print(PF_CPY_DGRAM % (diff_msec(base_t, event['comm_t']), event['pid'], event['comm'])) elif 'handle' in event.keys(): print(PF_WJOINT) if event['handle'] == "kfree_skb": print(PF_KFREE_SKB % (diff_msec(base_t, event['comm_t']), event['location'])) elif event['handle'] == "consume_skb": print(PF_CONS_SKB % diff_msec(base_t, event['comm_t'])) print(PF_JOINT) def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(key=cmp_to_key(lambda a,b :a[EINFO_IDX_TIME] < b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print(" dev len Qdisc " " netdevice free") for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print("debug buffer status") print("----------------------------") print("xmit Qdisc:remain:%d overflow:%d" % (len(tx_queue_list), of_count_tx_queue_list)) print("xmit netdevice:remain:%d overflow:%d" % (len(tx_xmit_list), of_count_tx_xmit_list)) print("receive:remain:%d overflow:%d" % (len(rx_skb_list), of_count_rx_skb_list)) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, callchain, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name, work=None, budget=None): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name, work, budget) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information related to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name, work, budget) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time, 'work':work, 'budget':budget} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/netdev-times.py
# failed system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print("Press control+C to stop and show the summary") def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): raw_syscalls__sys_exit(**locals()) def print_error_totals(): if for_comm is not None: print("\nsyscall errors for %s:\n" % (for_comm)) else: print("\nsyscall errors:\n") print("%-30s %10s" % ("comm [pid]", "count")) print("%-30s %10s" % ("------------------------------", "----------")) comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print("\n%s [%d]" % (comm, pid)) id_keys = syscalls[comm][pid].keys() for id in id_keys: print(" syscall: %-16s" % syscall_name(id)) ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True): print(" err = %-20s %10d" % (strerror(ret), val))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/failed-syscalls-by-pid.py
# export-to-sqlite.py: export perf data to a sqlite3 database # Copyright (c) 2017, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from __future__ import print_function import os import sys import struct import datetime # To use this script you will need to have installed package python-pyside which # provides LGPL-licensed Python bindings for Qt. You will also need the package # libqt4-sql-sqlite for Qt sqlite3 support. # # Examples of installing pyside: # # ubuntu: # # $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql # # Alternately, to use Python3 and/or pyside 2, one of the following: # # $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql # $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql # $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql # fedora: # # $ sudo yum install python-pyside # # Alternately, to use Python3 and/or pyside 2, one of the following: # $ sudo yum install python3-pyside # $ pip install --user PySide2 # $ pip3 install --user PySide2 # # An example of using this script with Intel PT: # # $ perf record -e intel_pt//u ls # $ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls # 2017-07-31 14:26:07.326913 Creating database... # 2017-07-31 14:26:07.538097 Writing records... # 2017-07-31 14:26:09.889292 Adding indexes # 2017-07-31 14:26:09.958746 Done # # To browse the database, sqlite3 can be used e.g. # # $ sqlite3 pt_example # sqlite> .header on # sqlite> select * from samples_view where id < 10; # sqlite> .mode column # sqlite> select * from samples_view where id < 10; # sqlite> .tables # sqlite> .schema samples_view # sqlite> .quit # # An example of using the database is provided by the script # exported-sql-viewer.py. Refer to that script for details. # # The database structure is practically the same as created by the script # export-to-postgresql.py. Refer to that script for details. A notable # difference is the 'transaction' column of the 'samples' table which is # renamed 'transaction_' in sqlite because 'transaction' is a reserved word. pyside_version_1 = True if not "pyside-version-1" in sys.argv: try: from PySide2.QtSql import * pyside_version_1 = False except: pass if pyside_version_1: from PySide.QtSql import * sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') # These perf imports are not used at present #from perf_trace_context import * #from Core import * perf_db_export_mode = True perf_db_export_calls = False perf_db_export_callchains = False def printerr(*args, **keyword_args): print(*args, file=sys.stderr, **keyword_args) def printdate(*args, **kw_args): print(datetime.datetime.today(), *args, sep=' ', **kw_args) def usage(): printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]"); printerr("where: columns 'all' or 'branches'"); printerr(" calls 'calls' => create calls and call_paths table"); printerr(" callchains 'callchains' => create call_paths table"); printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); raise Exception("Too few or bad arguments") if (len(sys.argv) < 2): usage() dbname = sys.argv[1] if (len(sys.argv) >= 3): columns = sys.argv[2] else: columns = "all" if columns not in ("all", "branches"): usage() branches = (columns == "branches") for i in range(3,len(sys.argv)): if (sys.argv[i] == "calls"): perf_db_export_calls = True elif (sys.argv[i] == "callchains"): perf_db_export_callchains = True elif (sys.argv[i] == "pyside-version-1"): pass else: usage() def do_query(q, s): if (q.exec_(s)): return raise Exception("Query failed: " + q.lastError().text()) def do_query_(q): if (q.exec_()): return raise Exception("Query failed: " + q.lastError().text()) printdate("Creating database ...") db_exists = False try: f = open(dbname) f.close() db_exists = True except: pass if db_exists: raise Exception(dbname + " already exists") db = QSqlDatabase.addDatabase('QSQLITE') db.setDatabaseName(dbname) db.open() query = QSqlQuery(db) do_query(query, 'PRAGMA journal_mode = OFF') do_query(query, 'BEGIN TRANSACTION') do_query(query, 'CREATE TABLE selected_events (' 'id integer NOT NULL PRIMARY KEY,' 'name varchar(80))') do_query(query, 'CREATE TABLE machines (' 'id integer NOT NULL PRIMARY KEY,' 'pid integer,' 'root_dir varchar(4096))') do_query(query, 'CREATE TABLE threads (' 'id integer NOT NULL PRIMARY KEY,' 'machine_id bigint,' 'process_id bigint,' 'pid integer,' 'tid integer)') do_query(query, 'CREATE TABLE comms (' 'id integer NOT NULL PRIMARY KEY,' 'comm varchar(16),' 'c_thread_id bigint,' 'c_time bigint,' 'exec_flag boolean)') do_query(query, 'CREATE TABLE comm_threads (' 'id integer NOT NULL PRIMARY KEY,' 'comm_id bigint,' 'thread_id bigint)') do_query(query, 'CREATE TABLE dsos (' 'id integer NOT NULL PRIMARY KEY,' 'machine_id bigint,' 'short_name varchar(256),' 'long_name varchar(4096),' 'build_id varchar(64))') do_query(query, 'CREATE TABLE symbols (' 'id integer NOT NULL PRIMARY KEY,' 'dso_id bigint,' 'sym_start bigint,' 'sym_end bigint,' 'binding integer,' 'name varchar(2048))') do_query(query, 'CREATE TABLE branch_types (' 'id integer NOT NULL PRIMARY KEY,' 'name varchar(80))') if branches: do_query(query, 'CREATE TABLE samples (' 'id integer NOT NULL PRIMARY KEY,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean,' 'call_path_id bigint,' 'insn_count bigint,' 'cyc_count bigint,' 'flags integer)') else: do_query(query, 'CREATE TABLE samples (' 'id integer NOT NULL PRIMARY KEY,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'period bigint,' 'weight bigint,' 'transaction_ bigint,' 'data_src bigint,' 'branch_type integer,' 'in_tx boolean,' 'call_path_id bigint,' 'insn_count bigint,' 'cyc_count bigint,' 'flags integer)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE TABLE call_paths (' 'id integer NOT NULL PRIMARY KEY,' 'parent_id bigint,' 'symbol_id bigint,' 'ip bigint)') if perf_db_export_calls: do_query(query, 'CREATE TABLE calls (' 'id integer NOT NULL PRIMARY KEY,' 'thread_id bigint,' 'comm_id bigint,' 'call_path_id bigint,' 'call_time bigint,' 'return_time bigint,' 'branch_count bigint,' 'call_id bigint,' 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer,' 'parent_id bigint,' 'insn_count bigint,' 'cyc_count bigint)') do_query(query, 'CREATE TABLE ptwrite (' 'id integer NOT NULL PRIMARY KEY,' 'payload bigint,' 'exact_ip integer)') do_query(query, 'CREATE TABLE cbr (' 'id integer NOT NULL PRIMARY KEY,' 'cbr integer,' 'mhz integer,' 'percent integer)') do_query(query, 'CREATE TABLE mwait (' 'id integer NOT NULL PRIMARY KEY,' 'hints integer,' 'extensions integer)') do_query(query, 'CREATE TABLE pwre (' 'id integer NOT NULL PRIMARY KEY,' 'cstate integer,' 'subcstate integer,' 'hw integer)') do_query(query, 'CREATE TABLE exstop (' 'id integer NOT NULL PRIMARY KEY,' 'exact_ip integer)') do_query(query, 'CREATE TABLE pwrx (' 'id integer NOT NULL PRIMARY KEY,' 'deepest_cstate integer,' 'last_cstate integer,' 'wake_reason integer)') do_query(query, 'CREATE TABLE context_switches (' 'id integer NOT NULL PRIMARY KEY,' 'machine_id bigint,' 'time bigint,' 'cpu integer,' 'thread_out_id bigint,' 'comm_out_id bigint,' 'thread_in_id bigint,' 'comm_in_id bigint,' 'flags integer)') # printf was added to sqlite in version 3.8.3 sqlite_has_printf = False try: do_query(query, 'SELECT printf("") FROM machines') sqlite_has_printf = True except: pass def emit_to_hex(x): if sqlite_has_printf: return 'printf("%x", ' + x + ')' else: return x do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' 'id,' 'pid,' 'root_dir,' 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' ' FROM machines') do_query(query, 'CREATE VIEW dsos_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'short_name,' 'long_name,' 'build_id' ' FROM dsos') do_query(query, 'CREATE VIEW symbols_view AS ' 'SELECT ' 'id,' 'name,' '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' 'dso_id,' 'sym_start,' 'sym_end,' 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' ' FROM symbols') do_query(query, 'CREATE VIEW threads_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'process_id,' 'pid,' 'tid' ' FROM threads') do_query(query, 'CREATE VIEW comm_threads_view AS ' 'SELECT ' 'comm_id,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid' ' FROM comm_threads') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE VIEW call_paths_view AS ' 'SELECT ' 'c.id,' + emit_to_hex('c.ip') + ' AS ip,' 'c.symbol_id,' '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' 'c.parent_id,' + emit_to_hex('p.ip') + ' AS parent_ip,' 'p.symbol_id AS parent_symbol_id,' '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') if perf_db_export_calls: do_query(query, 'CREATE VIEW calls_view AS ' 'SELECT ' 'calls.id,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'call_path_id,' + emit_to_hex('ip') + ' AS ip,' 'symbol_id,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'call_time,' 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' 'insn_count,' 'cyc_count,' 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,' 'call_id,' 'return_id,' 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' 'parent_call_path_id,' 'calls.parent_id' ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') do_query(query, 'CREATE VIEW samples_view AS ' 'SELECT ' 'id,' 'time,' 'cpu,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' + emit_to_hex('ip') + ' AS ip_hex,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'sym_offset,' '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' + emit_to_hex('to_ip') + ' AS to_ip_hex,' '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' 'in_tx,' 'insn_count,' 'cyc_count,' 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,' 'flags' ' FROM samples') do_query(query, 'CREATE VIEW ptwrite_view AS ' 'SELECT ' 'ptwrite.id,' 'time,' 'cpu,' + emit_to_hex('payload') + ' AS payload_hex,' 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip' ' FROM ptwrite' ' INNER JOIN samples ON samples.id = ptwrite.id') do_query(query, 'CREATE VIEW cbr_view AS ' 'SELECT ' 'cbr.id,' 'time,' 'cpu,' 'cbr,' 'mhz,' 'percent' ' FROM cbr' ' INNER JOIN samples ON samples.id = cbr.id') do_query(query, 'CREATE VIEW mwait_view AS ' 'SELECT ' 'mwait.id,' 'time,' 'cpu,' + emit_to_hex('hints') + ' AS hints_hex,' + emit_to_hex('extensions') + ' AS extensions_hex' ' FROM mwait' ' INNER JOIN samples ON samples.id = mwait.id') do_query(query, 'CREATE VIEW pwre_view AS ' 'SELECT ' 'pwre.id,' 'time,' 'cpu,' 'cstate,' 'subcstate,' 'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw' ' FROM pwre' ' INNER JOIN samples ON samples.id = pwre.id') do_query(query, 'CREATE VIEW exstop_view AS ' 'SELECT ' 'exstop.id,' 'time,' 'cpu,' 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip' ' FROM exstop' ' INNER JOIN samples ON samples.id = exstop.id') do_query(query, 'CREATE VIEW pwrx_view AS ' 'SELECT ' 'pwrx.id,' 'time,' 'cpu,' 'deepest_cstate,' 'last_cstate,' 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' ' WHEN wake_reason=2 THEN \'Timer Deadline\'' ' WHEN wake_reason=4 THEN \'Monitored Address\'' ' WHEN wake_reason=8 THEN \'HW\'' ' ELSE wake_reason ' 'END AS wake_reason' ' FROM pwrx' ' INNER JOIN samples ON samples.id = pwrx.id') do_query(query, 'CREATE VIEW power_events_view AS ' 'SELECT ' 'samples.id,' 'time,' 'cpu,' 'selected_events.name AS event,' 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,' 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,' 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,' 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,' 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,' 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,' 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,' 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,' 'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,' 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,' 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,' 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT ' 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' ' WHEN wake_reason=2 THEN \'Timer Deadline\'' ' WHEN wake_reason=4 THEN \'Monitored Address\'' ' WHEN wake_reason=8 THEN \'HW\'' ' ELSE wake_reason ' 'END' ' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason' ' FROM samples' ' INNER JOIN selected_events ON selected_events.id = evsel_id' ' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')') do_query(query, 'CREATE VIEW context_switches_view AS ' 'SELECT ' 'context_switches.id,' 'context_switches.machine_id,' 'context_switches.time,' 'context_switches.cpu,' 'th_out.pid AS pid_out,' 'th_out.tid AS tid_out,' 'comm_out.comm AS comm_out,' 'th_in.pid AS pid_in,' 'th_in.tid AS tid_in,' 'comm_in.comm AS comm_in,' 'CASE WHEN context_switches.flags = 0 THEN \'in\'' ' WHEN context_switches.flags = 1 THEN \'out\'' ' WHEN context_switches.flags = 3 THEN \'out preempt\'' ' ELSE context_switches.flags ' 'END AS flags' ' FROM context_switches' ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id' ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id' ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id' ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id') do_query(query, 'END TRANSACTION') evsel_query = QSqlQuery(db) evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)") machine_query = QSqlQuery(db) machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)") thread_query = QSqlQuery(db) thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)") comm_query = QSqlQuery(db) comm_query.prepare("INSERT INTO comms VALUES (?, ?, ?, ?, ?)") comm_thread_query = QSqlQuery(db) comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)") dso_query = QSqlQuery(db) dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)") symbol_query = QSqlQuery(db) symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)") branch_type_query = QSqlQuery(db) branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)") sample_query = QSqlQuery(db) if branches: sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") else: sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") if perf_db_export_calls or perf_db_export_callchains: call_path_query = QSqlQuery(db) call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)") if perf_db_export_calls: call_query = QSqlQuery(db) call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") ptwrite_query = QSqlQuery(db) ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)") cbr_query = QSqlQuery(db) cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)") mwait_query = QSqlQuery(db) mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)") pwre_query = QSqlQuery(db) pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)") exstop_query = QSqlQuery(db) exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)") pwrx_query = QSqlQuery(db) pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)") context_switch_query = QSqlQuery(db) context_switch_query.prepare("INSERT INTO context_switches VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)") def trace_begin(): printdate("Writing records...") do_query(query, 'BEGIN TRANSACTION') # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs evsel_table(0, "unknown") machine_table(0, 0, "unknown") thread_table(0, 0, 0, -1, -1) comm_table(0, "unknown", 0, 0, 0) dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls or perf_db_export_callchains: call_path_table(0, 0, 0, 0) call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) unhandled_count = 0 def is_table_empty(table_name): do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); if query.next(): return False return True def drop(table_name): do_query(query, 'DROP VIEW ' + table_name + '_view'); do_query(query, 'DROP TABLE ' + table_name); def trace_end(): do_query(query, 'END TRANSACTION') printdate("Adding indexes") if perf_db_export_calls: do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') do_query(query, 'ALTER TABLE comms ADD has_calls boolean') do_query(query, 'UPDATE comms SET has_calls = 1 WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)') printdate("Dropping unused tables") if is_table_empty("ptwrite"): drop("ptwrite") if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): do_query(query, 'DROP VIEW power_events_view'); drop("mwait") drop("pwre") drop("exstop") drop("pwrx") if is_table_empty("cbr"): drop("cbr") if is_table_empty("context_switches"): drop("context_switches") if (unhandled_count): printdate("Warning: ", unhandled_count, " unhandled events") printdate("Done") def trace_unhandled(event_name, context, event_fields_dict): global unhandled_count unhandled_count += 1 def sched__sched_switch(*x): pass def bind_exec(q, n, x): for xx in x[0:n]: q.addBindValue(str(xx)) do_query_(q) def evsel_table(*x): bind_exec(evsel_query, 2, x) def machine_table(*x): bind_exec(machine_query, 3, x) def thread_table(*x): bind_exec(thread_query, 5, x) def comm_table(*x): bind_exec(comm_query, 5, x) def comm_thread_table(*x): bind_exec(comm_thread_query, 3, x) def dso_table(*x): bind_exec(dso_query, 5, x) def symbol_table(*x): bind_exec(symbol_query, 6, x) def branch_type_table(*x): bind_exec(branch_type_query, 2, x) def sample_table(*x): if branches: for xx in x[0:15]: sample_query.addBindValue(str(xx)) for xx in x[19:25]: sample_query.addBindValue(str(xx)) do_query_(sample_query) else: bind_exec(sample_query, 25, x) def call_path_table(*x): bind_exec(call_path_query, 4, x) def call_return_table(*x): bind_exec(call_query, 14, x) def ptwrite(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) flags = data[0] payload = data[1] exact_ip = flags & 1 ptwrite_query.addBindValue(str(id)) ptwrite_query.addBindValue(str(payload)) ptwrite_query.addBindValue(str(exact_ip)) do_query_(ptwrite_query) def cbr(id, raw_buf): data = struct.unpack_from("<BBBBII", raw_buf) cbr = data[0] MHz = (data[4] + 500) / 1000 percent = ((cbr * 1000 / data[2]) + 5) / 10 cbr_query.addBindValue(str(id)) cbr_query.addBindValue(str(cbr)) cbr_query.addBindValue(str(MHz)) cbr_query.addBindValue(str(percent)) do_query_(cbr_query) def mwait(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] hints = payload & 0xff extensions = (payload >> 32) & 0x3 mwait_query.addBindValue(str(id)) mwait_query.addBindValue(str(hints)) mwait_query.addBindValue(str(extensions)) do_query_(mwait_query) def pwre(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] hw = (payload >> 7) & 1 cstate = (payload >> 12) & 0xf subcstate = (payload >> 8) & 0xf pwre_query.addBindValue(str(id)) pwre_query.addBindValue(str(cstate)) pwre_query.addBindValue(str(subcstate)) pwre_query.addBindValue(str(hw)) do_query_(pwre_query) def exstop(id, raw_buf): data = struct.unpack_from("<I", raw_buf) flags = data[0] exact_ip = flags & 1 exstop_query.addBindValue(str(id)) exstop_query.addBindValue(str(exact_ip)) do_query_(exstop_query) def pwrx(id, raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] deepest_cstate = payload & 0xf last_cstate = (payload >> 4) & 0xf wake_reason = (payload >> 8) & 0xf pwrx_query.addBindValue(str(id)) pwrx_query.addBindValue(str(deepest_cstate)) pwrx_query.addBindValue(str(last_cstate)) pwrx_query.addBindValue(str(wake_reason)) do_query_(pwrx_query) def synth_data(id, config, raw_buf, *x): if config == 0: ptwrite(id, raw_buf) elif config == 1: mwait(id, raw_buf) elif config == 2: pwre(id, raw_buf) elif config == 3: exstop(id, raw_buf) elif config == 4: pwrx(id, raw_buf) elif config == 5: cbr(id, raw_buf) def context_switch_table(*x): bind_exec(context_switch_query, 9, x)
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/export-to-sqlite.py
# Cpu task migration overview toy # # Copyright (C) 2010 Frederic Weisbecker <[email protected]> # # perf script event handlers have been generated by perf script -g python # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from __future__ import print_function import os import sys from collections import defaultdict try: from UserList import UserList except ImportError: # Python 3: UserList moved to the collections package from collections import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from SchedGui import * threads = { 0 : "idle"} def thread_name(pid): return "%s:%d" % (threads[pid], pid) class RunqueueEventUnknown: @staticmethod def color(): return None def __repr__(self): return "unknown" class RunqueueEventSleep: @staticmethod def color(): return (0, 0, 0xff) def __init__(self, sleeper): self.sleeper = sleeper def __repr__(self): return "%s gone to sleep" % thread_name(self.sleeper) class RunqueueEventWakeup: @staticmethod def color(): return (0xff, 0xff, 0) def __init__(self, wakee): self.wakee = wakee def __repr__(self): return "%s woke up" % thread_name(self.wakee) class RunqueueEventFork: @staticmethod def color(): return (0, 0xff, 0) def __init__(self, child): self.child = child def __repr__(self): return "new forked task %s" % thread_name(self.child) class RunqueueMigrateIn: @staticmethod def color(): return (0, 0xf0, 0xff) def __init__(self, new): self.new = new def __repr__(self): return "task migrated in %s" % thread_name(self.new) class RunqueueMigrateOut: @staticmethod def color(): return (0xff, 0, 0xff) def __init__(self, old): self.old = old def __repr__(self): return "task migrated out %s" % thread_name(self.old) class RunqueueSnapshot: def __init__(self, tasks = [0], event = RunqueueEventUnknown()): self.tasks = tuple(tasks) self.event = event def sched_switch(self, prev, prev_state, next): event = RunqueueEventUnknown() if taskState(prev_state) == "R" and next in self.tasks \ and prev in self.tasks: return self if taskState(prev_state) != "R": event = RunqueueEventSleep(prev) next_tasks = list(self.tasks[:]) if prev in self.tasks: if taskState(prev_state) != "R": next_tasks.remove(prev) elif taskState(prev_state) == "R": next_tasks.append(prev) if next not in next_tasks: next_tasks.append(next) return RunqueueSnapshot(next_tasks, event) def migrate_out(self, old): if old not in self.tasks: return self next_tasks = [task for task in self.tasks if task != old] return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) def __migrate_in(self, new, event): if new in self.tasks: self.event = event return self next_tasks = self.tasks[:] + tuple([new]) return RunqueueSnapshot(next_tasks, event) def migrate_in(self, new): return self.__migrate_in(new, RunqueueMigrateIn(new)) def wake_up(self, new): return self.__migrate_in(new, RunqueueEventWakeup(new)) def wake_up_new(self, new): return self.__migrate_in(new, RunqueueEventFork(new)) def load(self): """ Provide the number of tasks on the runqueue. Don't count idle""" return len(self.tasks) - 1 def __repr__(self): ret = self.tasks.__repr__() ret += self.origin_tostring() return ret class TimeSlice: def __init__(self, start, prev): self.start = start self.prev = prev self.end = start # cpus that triggered the event self.event_cpus = [] if prev is not None: self.total_load = prev.total_load self.rqs = prev.rqs.copy() else: self.rqs = defaultdict(RunqueueSnapshot) self.total_load = 0 def __update_total_load(self, old_rq, new_rq): diff = new_rq.load() - old_rq.load() self.total_load += diff def sched_switch(self, ts_list, prev, prev_state, next, cpu): old_rq = self.prev.rqs[cpu] new_rq = old_rq.sched_switch(prev, prev_state, next) if old_rq is new_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def migrate(self, ts_list, new, old_cpu, new_cpu): if old_cpu == new_cpu: return old_rq = self.prev.rqs[old_cpu] out_rq = old_rq.migrate_out(new) self.rqs[old_cpu] = out_rq self.__update_total_load(old_rq, out_rq) new_rq = self.prev.rqs[new_cpu] in_rq = new_rq.migrate_in(new) self.rqs[new_cpu] = in_rq self.__update_total_load(new_rq, in_rq) ts_list.append(self) if old_rq is not out_rq: self.event_cpus.append(old_cpu) self.event_cpus.append(new_cpu) def wake_up(self, ts_list, pid, cpu, fork): old_rq = self.prev.rqs[cpu] if fork: new_rq = old_rq.wake_up_new(pid) else: new_rq = old_rq.wake_up(pid) if new_rq is old_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def next(self, t): self.end = t return TimeSlice(t, self) class TimeSliceList(UserList): def __init__(self, arg = []): self.data = arg def get_time_slice(self, ts): if len(self.data) == 0: slice = TimeSlice(ts, TimeSlice(-1, None)) else: slice = self.data[-1].next(ts) return slice def find_time_slice(self, ts): start = 0 end = len(self.data) found = -1 searching = True while searching: if start == end or start == end - 1: searching = False i = (end + start) / 2 if self.data[i].start <= ts and self.data[i].end >= ts: found = i end = i continue if self.data[i].end < ts: start = i elif self.data[i].start > ts: end = i return found def set_root_win(self, win): self.root_win = win def mouse_down(self, cpu, t): idx = self.find_time_slice(t) if idx == -1: return ts = self[idx] rq = ts.rqs[cpu] raw = "CPU: %d\n" % cpu raw += "Last event : %s\n" % rq.event.__repr__() raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) raw += "Load = %d\n" % rq.load() for t in rq.tasks: raw += "%s \n" % thread_name(t) self.root_win.update_summary(raw) def update_rectangle_cpu(self, slice, cpu): rq = slice.rqs[cpu] if slice.total_load != 0: load_rate = rq.load() / float(slice.total_load) else: load_rate = 0 red_power = int(0xff - (0xff * load_rate)) color = (0xff, red_power, red_power) top_color = None if cpu in slice.event_cpus: top_color = rq.event.color() self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) def fill_zone(self, start, end): i = self.find_time_slice(start) if i == -1: return for i in range(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return for cpu in timeslice.rqs: self.update_rectangle_cpu(timeslice, cpu) def interval(self): if len(self.data) == 0: return (0, 0) return (self.data[0].start, self.data[-1].end) def nr_rectangles(self): last_ts = self.data[-1] max_cpu = 0 for cpu in last_ts.rqs: if cpu > max_cpu: max_cpu = cpu return max_cpu class SchedEventProxy: def __init__(self): self.current_tsk = defaultdict(lambda : -1) self.timeslices = TimeSliceList() def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): """ Ensure the task we sched out this cpu is really the one we logged. Otherwise we may have missed traces """ on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm self.current_tsk[headers.cpu] = next_pid ts = self.timeslices.get_time_slice(headers.ts()) ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): ts = self.timeslices.get_time_slice(headers.ts()) ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) def wake_up(self, headers, comm, pid, success, target_cpu, fork): if success == 0: return ts = self.timeslices.get_time_slice(headers.ts()) ts.wake_up(self.timeslices, pid, target_cpu, fork) def trace_begin(): global parser parser = SchedEventProxy() def trace_end(): app = wx.App(False) timeslices = parser.timeslices frame = RootFrame(timeslices, "Migration") app.MainLoop() def sched__sched_stat_runtime(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, runtime, vruntime): pass def sched__sched_stat_iowait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_stat_sleep(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_stat_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_process_fork(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, parent_comm, parent_pid, child_comm, child_pid): pass def sched__sched_process_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_process_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_process_free(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, orig_cpu, dest_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) def sched__sched_switch(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio) def sched__sched_wakeup_new(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.wake_up(headers, comm, pid, success, target_cpu, 1) def sched__sched_wakeup(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.wake_up(headers, comm, pid, success, target_cpu, 0) def sched__sched_wait_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_kthread_stop_ret(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, ret): pass def sched__sched_kthread_stop(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid): pass def trace_unhandled(event_name, context, event_fields_dict): pass
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/sched-migration.py
# system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. from __future__ import print_function import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts-by-pid.py [comm]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print("Press control+C to stop and show the summary") def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, args): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return try: syscalls[common_comm][common_pid][id] += 1 except TypeError: syscalls[common_comm][common_pid][id] = 1 def syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): raw_syscalls__sys_enter(**locals()) def print_syscall_totals(): if for_comm is not None: print("\nsyscall events for %s:\n" % (for_comm)) else: print("\nsyscall events by comm/pid:\n") print("%-40s %10s" % ("comm [pid]/syscalls", "count")) print("%-40s %10s" % ("----------------------------------------", "----------")) comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print("\n%s [%d]" % (comm, pid)) id_keys = syscalls[comm][pid].keys() for id, val in sorted(syscalls[comm][pid].items(), key = lambda kv: (kv[1], kv[0]), reverse = True): print(" %-38s %10d" % (syscall_name(id), val))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/syscall-counts-by-pid.py
# flamegraph.py - create flame graphs from perf samples # SPDX-License-Identifier: GPL-2.0 # # Usage: # # perf record -a -g -F 99 sleep 60 # perf script report flamegraph # # Combined: # # perf script flamegraph -a -F 99 sleep 60 # # Written by Andreas Gerstmayr <[email protected]> # Flame Graphs invented by Brendan Gregg <[email protected]> # Works in tandem with d3-flame-graph by Martin Spier <[email protected]> # # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring from __future__ import print_function import sys import os import io import argparse import json import subprocess # pylint: disable=too-few-public-methods class Node: def __init__(self, name, libtype): self.name = name # "root" | "kernel" | "" # "" indicates user space self.libtype = libtype self.value = 0 self.children = [] def to_json(self): return { "n": self.name, "l": self.libtype, "v": self.value, "c": self.children } class FlameGraphCLI: def __init__(self, args): self.args = args self.stack = Node("all", "root") if self.args.format == "html" and \ not os.path.isfile(self.args.template): print("Flame Graph template {} does not exist. Please install " "the js-d3-flame-graph (RPM) or libjs-d3-flame-graph (deb) " "package, specify an existing flame graph template " "(--template PATH) or another output format " "(--format FORMAT).".format(self.args.template), file=sys.stderr) sys.exit(1) @staticmethod def get_libtype_from_dso(dso): """ when kernel-debuginfo is installed, dso points to /usr/lib/debug/lib/modules/*/vmlinux """ if dso and (dso == "[kernel.kallsyms]" or dso.endswith("/vmlinux")): return "kernel" return "" @staticmethod def find_or_create_node(node, name, libtype): for child in node.children: if child.name == name: return child child = Node(name, libtype) node.children.append(child) return child def process_event(self, event): pid = event.get("sample", {}).get("pid", 0) # event["dso"] sometimes contains /usr/lib/debug/lib/modules/*/vmlinux # for user-space processes; let's use pid for kernel or user-space distinction if pid == 0: comm = event["comm"] libtype = "kernel" else: comm = "{} ({})".format(event["comm"], pid) libtype = "" node = self.find_or_create_node(self.stack, comm, libtype) if "callchain" in event: for entry in reversed(event["callchain"]): name = entry.get("sym", {}).get("name", "[unknown]") libtype = self.get_libtype_from_dso(entry.get("dso")) node = self.find_or_create_node(node, name, libtype) else: name = event.get("symbol", "[unknown]") libtype = self.get_libtype_from_dso(event.get("dso")) node = self.find_or_create_node(node, name, libtype) node.value += 1 def get_report_header(self): if self.args.input == "-": # when this script is invoked with "perf script flamegraph", # no perf.data is created and we cannot read the header of it return "" try: output = subprocess.check_output(["perf", "report", "--header-only"]) return output.decode("utf-8") except Exception as err: # pylint: disable=broad-except print("Error reading report header: {}".format(err), file=sys.stderr) return "" def trace_end(self): stacks_json = json.dumps(self.stack, default=lambda x: x.to_json()) if self.args.format == "html": report_header = self.get_report_header() options = { "colorscheme": self.args.colorscheme, "context": report_header } options_json = json.dumps(options) try: with io.open(self.args.template, encoding="utf-8") as template: output_str = ( template.read() .replace("/** @options_json **/", options_json) .replace("/** @flamegraph_json **/", stacks_json) ) except IOError as err: print("Error reading template file: {}".format(err), file=sys.stderr) sys.exit(1) output_fn = self.args.output or "flamegraph.html" else: output_str = stacks_json output_fn = self.args.output or "stacks.json" if output_fn == "-": with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out: out.write(output_str) else: print("dumping data to {}".format(output_fn)) try: with io.open(output_fn, "w", encoding="utf-8") as out: out.write(output_str) except IOError as err: print("Error writing output file: {}".format(err), file=sys.stderr) sys.exit(1) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create flame graphs.") parser.add_argument("-f", "--format", default="html", choices=["json", "html"], help="output file format") parser.add_argument("-o", "--output", help="output file name") parser.add_argument("--template", default="/usr/share/d3-flame-graph/d3-flamegraph-base.html", help="path to flame graph HTML template") parser.add_argument("--colorscheme", default="blue-green", help="flame graph color scheme", choices=["blue-green", "orange"]) parser.add_argument("-i", "--input", help=argparse.SUPPRESS) cli_args = parser.parse_args() cli = FlameGraphCLI(cli_args) process_event = cli.process_event trace_end = cli.trace_end
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/flamegraph.py
# SPDX-License-Identifier: GPL-2.0 from __future__ import print_function data = {} times = [] threads = [] cpus = [] def get_key(time, event, cpu, thread): return "%d-%s-%d-%d" % (time, event, cpu, thread) def store_key(time, cpu, thread): if (time not in times): times.append(time) if (cpu not in cpus): cpus.append(cpu) if (thread not in threads): threads.append(thread) def store(time, event, cpu, thread, val, ena, run): #print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % # (event, cpu, thread, time, val, ena, run)) store_key(time, cpu, thread) key = get_key(time, event, cpu, thread) data[key] = [ val, ena, run] def get(time, event, cpu, thread): key = get_key(time, event, cpu, thread) return data[key][0] def stat__cycles_k(cpu, thread, time, val, ena, run): store(time, "cycles", cpu, thread, val, ena, run); def stat__instructions_k(cpu, thread, time, val, ena, run): store(time, "instructions", cpu, thread, val, ena, run); def stat__cycles_u(cpu, thread, time, val, ena, run): store(time, "cycles", cpu, thread, val, ena, run); def stat__instructions_u(cpu, thread, time, val, ena, run): store(time, "instructions", cpu, thread, val, ena, run); def stat__cycles(cpu, thread, time, val, ena, run): store(time, "cycles", cpu, thread, val, ena, run); def stat__instructions(cpu, thread, time, val, ena, run): store(time, "instructions", cpu, thread, val, ena, run); def stat__interval(time): for cpu in cpus: for thread in threads: cyc = get(time, "cycles", cpu, thread) ins = get(time, "instructions", cpu, thread) cpi = 0 if ins != 0: cpi = cyc/float(ins) print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)) def trace_end(): pass # XXX trace_end callback could be used as an alternative place # to compute same values as in the script above: # # for time in times: # for cpu in cpus: # for thread in threads: # cyc = get(time, "cycles", cpu, thread) # ins = get(time, "instructions", cpu, thread) # # if ins != 0: # cpi = cyc/float(ins) # # print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/stat-cpi.py
# perf script event handlers, generated by perf script -g python # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # This script tests basic functionality such as flag and symbol # strings, common_xxx() calls back into perf, begin, end, unhandled # events, etc. Basically, if this script runs successfully and # displays expected results, Python scripting support should be ok. from __future__ import print_function import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Core import * from perf_trace_context import * unhandled = autodict() def trace_begin(): print("trace_begin") pass def trace_end(): print_unhandled() def irq__softirq_entry(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, vec): print_header(event_name, common_cpu, common_secs, common_nsecs, common_pid, common_comm) print_uncommon(context) print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec))) def kmem__kmalloc(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, call_site, ptr, bytes_req, bytes_alloc, gfp_flags): print_header(event_name, common_cpu, common_secs, common_nsecs, common_pid, common_comm) print_uncommon(context) print("call_site=%u, ptr=%u, bytes_req=%u, " "bytes_alloc=%u, gfp_flags=%s" % (call_site, ptr, bytes_req, bytes_alloc, flag_str("kmem__kmalloc", "gfp_flags", gfp_flags))) def trace_unhandled(event_name, context, event_fields_dict): try: unhandled[event_name] += 1 except TypeError: unhandled[event_name] = 1 def print_header(event_name, cpu, secs, nsecs, pid, comm): print("%-20s %5u %05u.%09u %8u %-20s " % (event_name, cpu, secs, nsecs, pid, comm), end=' ') # print trace fields not included in handler args def print_uncommon(context): print("common_preempt_count=%d, common_flags=%s, " "common_lock_depth=%d, " % (common_pc(context), trace_flag_str(common_flags(context)), common_lock_depth(context))) def print_unhandled(): keys = unhandled.keys() if not keys: return print("\nunhandled events:\n") print("%-40s %10s" % ("event", "count")) print("%-40s %10s" % ("----------------------------------------", "-----------")) for event_name in keys: print("%-40s %10d\n" % (event_name, unhandled[event_name]))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/check-perf-trace.py
# report time spent in compaction # Licensed under the terms of the GNU GPL License version 2 # testing: # 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones import os import sys import re import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n" class popt: DISP_DFL = 0 DISP_PROC = 1 DISP_PROC_VERBOSE=2 class topt: DISP_TIME = 0 DISP_MIG = 1 DISP_ISOLFREE = 2 DISP_ISOLMIG = 4 DISP_ALL = 7 class comm_filter: def __init__(self, re): self.re = re def filter(self, pid, comm): m = self.re.search(comm) return m == None or m.group() == "" class pid_filter: def __init__(self, low, high): self.low = (0 if low == "" else int(low)) self.high = (0 if high == "" else int(high)) def filter(self, pid, comm): return not (pid >= self.low and (self.high == 0 or pid <= self.high)) def set_type(t): global opt_disp opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t) def ns(sec, nsec): return (sec * 1000000000) + nsec def time(ns): return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000) class pair: def __init__(self, aval, bval, alabel = None, blabel = None): self.alabel = alabel self.blabel = blabel self.aval = aval self.bval = bval def __add__(self, rhs): self.aval += rhs.aval self.bval += rhs.bval return self def __str__(self): return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval) class cnode: def __init__(self, ns): self.ns = ns self.migrated = pair(0, 0, "moved", "failed") self.fscan = pair(0,0, "scanned", "isolated") self.mscan = pair(0,0, "scanned", "isolated") def __add__(self, rhs): self.ns += rhs.ns self.migrated += rhs.migrated self.fscan += rhs.fscan self.mscan += rhs.mscan return self def __str__(self): prev = 0 s = "%s " % time(self.ns) if (opt_disp & topt.DISP_MIG): s += "migration: %s" % self.migrated prev = 1 if (opt_disp & topt.DISP_ISOLFREE): s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan) prev = 1 if (opt_disp & topt.DISP_ISOLMIG): s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan) return s def complete(self, secs, nsecs): self.ns = ns(secs, nsecs) - self.ns def increment(self, migrated, fscan, mscan): if (migrated != None): self.migrated += migrated if (fscan != None): self.fscan += fscan if (mscan != None): self.mscan += mscan class chead: heads = {} val = cnode(0); fobj = None @classmethod def add_filter(cls, filter): cls.fobj = filter @classmethod def create_pending(cls, pid, comm, start_secs, start_nsecs): filtered = 0 try: head = cls.heads[pid] filtered = head.is_filtered() except KeyError: if cls.fobj != None: filtered = cls.fobj.filter(pid, comm) head = cls.heads[pid] = chead(comm, pid, filtered) if not filtered: head.mark_pending(start_secs, start_nsecs) @classmethod def increment_pending(cls, pid, migrated, fscan, mscan): head = cls.heads[pid] if not head.is_filtered(): if head.is_pending(): head.do_increment(migrated, fscan, mscan) else: sys.stderr.write("missing start compaction event for pid %d\n" % pid) @classmethod def complete_pending(cls, pid, secs, nsecs): head = cls.heads[pid] if not head.is_filtered(): if head.is_pending(): head.make_complete(secs, nsecs) else: sys.stderr.write("missing start compaction event for pid %d\n" % pid) @classmethod def gen(cls): if opt_proc != popt.DISP_DFL: for i in cls.heads: yield cls.heads[i] @classmethod def str(cls): return cls.val def __init__(self, comm, pid, filtered): self.comm = comm self.pid = pid self.val = cnode(0) self.pending = None self.filtered = filtered self.list = [] def __add__(self, rhs): self.ns += rhs.ns self.val += rhs.val return self def mark_pending(self, secs, nsecs): self.pending = cnode(ns(secs, nsecs)) def do_increment(self, migrated, fscan, mscan): self.pending.increment(migrated, fscan, mscan) def make_complete(self, secs, nsecs): self.pending.complete(secs, nsecs) chead.val += self.pending if opt_proc != popt.DISP_DFL: self.val += self.pending if opt_proc == popt.DISP_PROC_VERBOSE: self.list.append(self.pending) self.pending = None def enumerate(self): if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered(): for i, pelem in enumerate(self.list): sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem)) def is_pending(self): return self.pending != None def is_filtered(self): return self.filtered def display(self): if not self.is_filtered(): sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val)) def trace_end(): sys.stdout.write("total: %s\n" % chead.str()) for i in chead.gen(): i.display(), i.enumerate() def compaction__mm_compaction_migratepages(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, nr_migrated, nr_failed): chead.increment_pending(common_pid, pair(nr_migrated, nr_failed), None, None) def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): chead.increment_pending(common_pid, None, pair(nr_scanned, nr_taken), None) def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): chead.increment_pending(common_pid, None, None, pair(nr_scanned, nr_taken)) def compaction__mm_compaction_end(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, zone_start, migrate_start, free_start, zone_end, sync, status): chead.complete_pending(common_pid, common_secs, common_nsecs) def compaction__mm_compaction_begin(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, zone_start, migrate_start, free_start, zone_end, sync): chead.create_pending(common_pid, common_comm, common_secs, common_nsecs) def pr_help(): global usage sys.stdout.write(usage) sys.stdout.write("\n") sys.stdout.write("-h display this help\n") sys.stdout.write("-p display by process\n") sys.stdout.write("-pv display by process (verbose)\n") sys.stdout.write("-t display stall times only\n") sys.stdout.write("-m display stats for migration\n") sys.stdout.write("-fs display stats for free scanner\n") sys.stdout.write("-ms display stats for migration scanner\n") sys.stdout.write("-u display results in microseconds (default nanoseconds)\n") comm_re = None pid_re = None pid_regex = "^(\d*)-(\d*)$|^(\d*)$" opt_proc = popt.DISP_DFL opt_disp = topt.DISP_ALL opt_ns = True argc = len(sys.argv) - 1 if argc >= 1: pid_re = re.compile(pid_regex) for i, opt in enumerate(sys.argv[1:]): if opt[0] == "-": if opt == "-h": pr_help() exit(0); elif opt == "-p": opt_proc = popt.DISP_PROC elif opt == "-pv": opt_proc = popt.DISP_PROC_VERBOSE elif opt == '-u': opt_ns = False elif opt == "-t": set_type(topt.DISP_TIME) elif opt == "-m": set_type(topt.DISP_MIG) elif opt == "-fs": set_type(topt.DISP_ISOLFREE) elif opt == "-ms": set_type(topt.DISP_ISOLMIG) else: sys.exit(usage) elif i == argc - 1: m = pid_re.search(opt) if m != None and m.group() != "": if m.group(3) != None: f = pid_filter(m.group(3), m.group(3)) else: f = pid_filter(m.group(1), m.group(2)) else: try: comm_re=re.compile(opt) except: sys.stderr.write("invalid regex '%s'" % opt) sys.exit(usage) f = comm_filter(comm_re) chead.add_filter(f)
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/compaction-times.py
# stackcollapse.py - format perf samples with one line per distinct call stack # SPDX-License-Identifier: GPL-2.0 # # This script's output has two space-separated fields. The first is a semicolon # separated stack including the program name (from the "comm" field) and the # function names from the call stack. The second is a count: # # swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2 # # The file is sorted according to the first field. # # Input may be created and processed using: # # perf record -a -g -F 99 sleep 60 # perf script report stackcollapse > out.stacks-folded # # (perf script record stackcollapse works too). # # Written by Paolo Bonzini <[email protected]> # Based on Brendan Gregg's stackcollapse-perf.pl script. from __future__ import print_function import os import sys from collections import defaultdict from optparse import OptionParser, make_option sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from EventClass import * # command line parsing option_list = [ # formatting options for the bottom entry of the stack make_option("--include-tid", dest="include_tid", action="store_true", default=False, help="include thread id in stack"), make_option("--include-pid", dest="include_pid", action="store_true", default=False, help="include process id in stack"), make_option("--no-comm", dest="include_comm", action="store_false", default=True, help="do not separate stacks according to comm"), make_option("--tidy-java", dest="tidy_java", action="store_true", default=False, help="beautify Java signatures"), make_option("--kernel", dest="annotate_kernel", action="store_true", default=False, help="annotate kernel functions with _[k]") ] parser = OptionParser(option_list=option_list) (opts, args) = parser.parse_args() if len(args) != 0: parser.error("unexpected command line argument") if opts.include_tid and not opts.include_comm: parser.error("requesting tid but not comm is invalid") if opts.include_pid and not opts.include_comm: parser.error("requesting pid but not comm is invalid") # event handlers lines = defaultdict(lambda: 0) def process_event(param_dict): def tidy_function_name(sym, dso): if sym is None: sym = '[unknown]' sym = sym.replace(';', ':') if opts.tidy_java: # the original stackcollapse-perf.pl script gives the # example of converting this: # Lorg/mozilla/javascript/MemberBox;.<init>(Ljava/lang/reflect/Method;)V # to this: # org/mozilla/javascript/MemberBox:.init sym = sym.replace('<', '') sym = sym.replace('>', '') if sym[0] == 'L' and sym.find('/'): sym = sym[1:] try: sym = sym[:sym.index('(')] except ValueError: pass if opts.annotate_kernel and dso == '[kernel.kallsyms]': return sym + '_[k]' else: return sym stack = list() if 'callchain' in param_dict: for entry in param_dict['callchain']: entry.setdefault('sym', dict()) entry['sym'].setdefault('name', None) entry.setdefault('dso', None) stack.append(tidy_function_name(entry['sym']['name'], entry['dso'])) else: param_dict.setdefault('symbol', None) param_dict.setdefault('dso', None) stack.append(tidy_function_name(param_dict['symbol'], param_dict['dso'])) if opts.include_comm: comm = param_dict["comm"].replace(' ', '_') sep = "-" if opts.include_pid: comm = comm + sep + str(param_dict['sample']['pid']) sep = "/" if opts.include_tid: comm = comm + sep + str(param_dict['sample']['tid']) stack.append(comm) stack_string = ';'.join(reversed(stack)) lines[stack_string] = lines[stack_string] + 1 def trace_end(): list = sorted(lines) for stack in list: print("%s %d" % (stack, lines[stack]))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/stackcollapse.py
# SPDX-License-Identifier: GPL-2.0 # intel-pt-events.py: Print Intel PT Events including Power Events and PTWRITE # Copyright (c) 2017-2021, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from __future__ import division, print_function import os import sys import struct import argparse from libxed import LibXED from ctypes import create_string_buffer, addressof sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import perf_set_itrace_options, \ perf_sample_insn, perf_sample_srccode try: broken_pipe_exception = BrokenPipeError except: broken_pipe_exception = IOError glb_switch_str = {} glb_insn = False glb_disassembler = None glb_src = False glb_source_file_name = None glb_line_number = None glb_dso = None def get_optional_null(perf_dict, field): if field in perf_dict: return perf_dict[field] return "" def get_optional_zero(perf_dict, field): if field in perf_dict: return perf_dict[field] return 0 def get_optional_bytes(perf_dict, field): if field in perf_dict: return perf_dict[field] return bytes() def get_optional(perf_dict, field): if field in perf_dict: return perf_dict[field] return "[unknown]" def get_offset(perf_dict, field): if field in perf_dict: return "+%#x" % perf_dict[field] return "" def trace_begin(): ap = argparse.ArgumentParser(usage = "", add_help = False) ap.add_argument("--insn-trace", action='store_true') ap.add_argument("--src-trace", action='store_true') ap.add_argument("--all-switch-events", action='store_true') global glb_args global glb_insn global glb_src glb_args = ap.parse_args() if glb_args.insn_trace: print("Intel PT Instruction Trace") itrace = "i0nsepwxI" glb_insn = True elif glb_args.src_trace: print("Intel PT Source Trace") itrace = "i0nsepwxI" glb_insn = True glb_src = True else: print("Intel PT Branch Trace, Power Events, Event Trace and PTWRITE") itrace = "bepwxI" global glb_disassembler try: glb_disassembler = LibXED() except: glb_disassembler = None perf_set_itrace_options(perf_script_context, itrace) def trace_end(): print("End") def trace_unhandled(event_name, context, event_fields_dict): print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])) def print_ptwrite(raw_buf): data = struct.unpack_from("<IQ", raw_buf) flags = data[0] payload = data[1] exact_ip = flags & 1 try: s = payload.to_bytes(8, "little").decode("ascii").rstrip("\x00") if not s.isprintable(): s = "" except: s = "" print("IP: %u payload: %#x" % (exact_ip, payload), s, end=' ') def print_cbr(raw_buf): data = struct.unpack_from("<BBBBII", raw_buf) cbr = data[0] f = (data[4] + 500) / 1000 p = ((cbr * 1000 / data[2]) + 5) / 10 print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ') def print_mwait(raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] hints = payload & 0xff extensions = (payload >> 32) & 0x3 print("hints: %#x extensions: %#x" % (hints, extensions), end=' ') def print_pwre(raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] hw = (payload >> 7) & 1 cstate = (payload >> 12) & 0xf subcstate = (payload >> 8) & 0xf print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate), end=' ') def print_exstop(raw_buf): data = struct.unpack_from("<I", raw_buf) flags = data[0] exact_ip = flags & 1 print("IP: %u" % (exact_ip), end=' ') def print_pwrx(raw_buf): data = struct.unpack_from("<IQ", raw_buf) payload = data[1] deepest_cstate = payload & 0xf last_cstate = (payload >> 4) & 0xf wake_reason = (payload >> 8) & 0xf print("deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason), end=' ') def print_psb(raw_buf): data = struct.unpack_from("<IQ", raw_buf) offset = data[1] print("offset: %#x" % (offset), end=' ') glb_cfe = ["", "INTR", "IRET", "SMI", "RSM", "SIPI", "INIT", "VMENTRY", "VMEXIT", "VMEXIT_INTR", "SHUTDOWN", "", "UINT", "UIRET"] + [""] * 18 glb_evd = ["", "PFA", "VMXQ", "VMXR"] + [""] * 60 def print_evt(raw_buf): data = struct.unpack_from("<BBH", raw_buf) typ = data[0] & 0x1f ip_flag = (data[0] & 0x80) >> 7 vector = data[1] evd_cnt = data[2] s = glb_cfe[typ] if s: print(" cfe: %s IP: %u vector: %u" % (s, ip_flag, vector), end=' ') else: print(" cfe: %u IP: %u vector: %u" % (typ, ip_flag, vector), end=' ') pos = 4 for i in range(evd_cnt): data = struct.unpack_from("<QQ", raw_buf) et = data[0] & 0x3f s = glb_evd[et] if s: print("%s: %#x" % (s, data[1]), end=' ') else: print("EVD_%u: %#x" % (et, data[1]), end=' ') def print_iflag(raw_buf): data = struct.unpack_from("<IQ", raw_buf) iflag = data[0] & 1 old_iflag = iflag ^ 1 via_branch = data[0] & 2 branch_ip = data[1] if via_branch: s = "via" else: s = "non" print("IFLAG: %u->%u %s branch" % (old_iflag, iflag, s), end=' ') def common_start_str(comm, sample): ts = sample["time"] cpu = sample["cpu"] pid = sample["pid"] tid = sample["tid"] if "machine_pid" in sample: machine_pid = sample["machine_pid"] vcpu = sample["vcpu"] return "VM:%5d VCPU:%03d %16s %5u/%-5u [%03u] %9u.%09u " % (machine_pid, vcpu, comm, pid, tid, cpu, ts / 1000000000, ts %1000000000) else: return "%16s %5u/%-5u [%03u] %9u.%09u " % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000) def print_common_start(comm, sample, name): flags_disp = get_optional_null(sample, "flags_disp") # Unused fields: # period = sample["period"] # phys_addr = sample["phys_addr"] # weight = sample["weight"] # transaction = sample["transaction"] # cpumode = get_optional_zero(sample, "cpumode") print(common_start_str(comm, sample) + "%8s %21s" % (name, flags_disp), end=' ') def print_instructions_start(comm, sample): if "x" in get_optional_null(sample, "flags"): print(common_start_str(comm, sample) + "x", end=' ') else: print(common_start_str(comm, sample), end=' ') def disassem(insn, ip): inst = glb_disassembler.Instruction() glb_disassembler.SetMode(inst, 0) # Assume 64-bit buf = create_string_buffer(64) buf.value = insn return glb_disassembler.DisassembleOne(inst, addressof(buf), len(insn), ip) def print_common_ip(param_dict, sample, symbol, dso): ip = sample["ip"] offs = get_offset(param_dict, "symoff") if "cyc_cnt" in sample: cyc_cnt = sample["cyc_cnt"] insn_cnt = get_optional_zero(sample, "insn_cnt") ipc_str = " IPC: %#.2f (%u/%u)" % (insn_cnt / cyc_cnt, insn_cnt, cyc_cnt) else: ipc_str = "" if glb_insn and glb_disassembler is not None: insn = perf_sample_insn(perf_script_context) if insn and len(insn): cnt, text = disassem(insn, ip) byte_str = ("%x" % ip).rjust(16) if sys.version_info.major >= 3: for k in range(cnt): byte_str += " %02x" % insn[k] else: for k in xrange(cnt): byte_str += " %02x" % ord(insn[k]) print("%-40s %-30s" % (byte_str, text), end=' ') print("%s%s (%s)" % (symbol, offs, dso), end=' ') else: print("%16x %s%s (%s)" % (ip, symbol, offs, dso), end=' ') if "addr_correlates_sym" in sample: addr = sample["addr"] dso = get_optional(sample, "addr_dso") symbol = get_optional(sample, "addr_symbol") offs = get_offset(sample, "addr_symoff") print("=> %x %s%s (%s)%s" % (addr, symbol, offs, dso, ipc_str)) else: print(ipc_str) def print_srccode(comm, param_dict, sample, symbol, dso, with_insn): ip = sample["ip"] if symbol == "[unknown]": start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40) else: offs = get_offset(param_dict, "symoff") start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40) if with_insn and glb_insn and glb_disassembler is not None: insn = perf_sample_insn(perf_script_context) if insn and len(insn): cnt, text = disassem(insn, ip) start_str += text.ljust(30) global glb_source_file_name global glb_line_number global glb_dso source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context) if source_file_name: if glb_line_number == line_number and glb_source_file_name == source_file_name: src_str = "" else: if len(source_file_name) > 40: src_file = ("..." + source_file_name[-37:]) + " " else: src_file = source_file_name.ljust(41) if source_line is None: src_str = src_file + str(line_number).rjust(4) + " <source not found>" else: src_str = src_file + str(line_number).rjust(4) + " " + source_line glb_dso = None elif dso == glb_dso: src_str = "" else: src_str = dso glb_dso = dso glb_line_number = line_number glb_source_file_name = source_file_name print(start_str, src_str) def do_process_event(param_dict): event_attr = param_dict["attr"] sample = param_dict["sample"] raw_buf = param_dict["raw_buf"] comm = param_dict["comm"] name = param_dict["ev_name"] # Unused fields: # callchain = param_dict["callchain"] # brstack = param_dict["brstack"] # brstacksym = param_dict["brstacksym"] # Symbol and dso info are not always resolved dso = get_optional(param_dict, "dso") symbol = get_optional(param_dict, "symbol") cpu = sample["cpu"] if cpu in glb_switch_str: print(glb_switch_str[cpu]) del glb_switch_str[cpu] if name[0:12] == "instructions": if glb_src: print_srccode(comm, param_dict, sample, symbol, dso, True) else: print_instructions_start(comm, sample) print_common_ip(param_dict, sample, symbol, dso) elif name[0:8] == "branches": if glb_src: print_srccode(comm, param_dict, sample, symbol, dso, False) else: print_common_start(comm, sample, name) print_common_ip(param_dict, sample, symbol, dso) elif name == "ptwrite": print_common_start(comm, sample, name) print_ptwrite(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "cbr": print_common_start(comm, sample, name) print_cbr(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "mwait": print_common_start(comm, sample, name) print_mwait(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "pwre": print_common_start(comm, sample, name) print_pwre(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "exstop": print_common_start(comm, sample, name) print_exstop(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "pwrx": print_common_start(comm, sample, name) print_pwrx(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "psb": print_common_start(comm, sample, name) print_psb(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "evt": print_common_start(comm, sample, name) print_evt(raw_buf) print_common_ip(param_dict, sample, symbol, dso) elif name == "iflag": print_common_start(comm, sample, name) print_iflag(raw_buf) print_common_ip(param_dict, sample, symbol, dso) else: print_common_start(comm, sample, name) print_common_ip(param_dict, sample, symbol, dso) def process_event(param_dict): try: do_process_event(param_dict) except broken_pipe_exception: # Stop python printing broken pipe errors and traceback sys.stdout = open(os.devnull, 'w') sys.exit(1) def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x): if len(x) >= 2 and x[0]: machine_pid = x[0] vcpu = x[1] else: machine_pid = 0 vcpu = -1 try: if machine_pid: print("VM:%5d VCPU:%03d %16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" % (machine_pid, vcpu, "Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip)) else: print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" % ("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip)) except broken_pipe_exception: # Stop python printing broken pipe errors and traceback sys.stdout = open(os.devnull, 'w') sys.exit(1) def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x): if out: out_str = "Switch out " else: out_str = "Switch In " if out_preempt: preempt_str = "preempt" else: preempt_str = "" if len(x) >= 2 and x[0]: machine_pid = x[0] vcpu = x[1] else: vcpu = None; if machine_pid == -1: machine_str = "" elif vcpu is None: machine_str = "machine PID %d" % machine_pid else: machine_str = "machine PID %d VCPU %d" % (machine_pid, vcpu) switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \ (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str) if glb_args.all_switch_events: print(switch_str) else: global glb_switch_str glb_switch_str[cpu] = switch_str
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/intel-pt-events.py
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <[email protected]> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from __future__ import print_function import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if key not in dict: dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 'ppc64le' : audit.MACH_PPC64LE, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print("Install the audit-libs-python package to get syscall names.\n" "For example:\n # apt-get install python-audit (Ubuntu)" "\n # yum install audit-libs-python (Fedora)" "\n etc.\n") def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
# Core.py - Python extension for perf script, core functions # # Copyright (C) 2010 by Tom Zanussi <[email protected]> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from collections import defaultdict def autodict(): return defaultdict(autodict) flag_fields = autodict() symbolic_fields = autodict() def define_flag_field(event_name, field_name, delim): flag_fields[event_name][field_name]['delim'] = delim def define_flag_value(event_name, field_name, value, field_str): flag_fields[event_name][field_name]['values'][value] = field_str def define_symbolic_field(event_name, field_name): # nothing to do, really pass def define_symbolic_value(event_name, field_name, value, field_str): symbolic_fields[event_name][field_name]['values'][value] = field_str def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: print_delim = 0 for idx in sorted(flag_fields[event_name][field_name]['values']): if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break if idx and (value & idx) == idx: if print_delim and flag_fields[event_name][field_name]['delim']: string += " " + flag_fields[event_name][field_name]['delim'] + " " string += flag_fields[event_name][field_name]['values'][idx] print_delim = 1 value &= ~idx return string def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: for idx in sorted(symbolic_fields[event_name][field_name]['values']): if not value and not idx: string = symbolic_fields[event_name][field_name]['values'][idx] break if (value == idx): string = symbolic_fields[event_name][field_name]['values'][idx] break return string trace_flags = { 0x00: "NONE", \ 0x01: "IRQS_OFF", \ 0x02: "IRQS_NOSUPPORT", \ 0x04: "NEED_RESCHED", \ 0x08: "HARDIRQ", \ 0x10: "SOFTIRQ" } def trace_flag_str(value): string = "" print_delim = 0 for idx in trace_flags: if not value and not idx: string += "NONE" break if idx and (value & idx) == idx: if print_delim: string += " | "; string += trace_flags[idx] print_delim = 1 value &= ~idx return string def taskState(state): states = { 0 : "R", 1 : "S", 2 : "D", 64: "DEAD" } if state not in states: return "Unknown" return states[state] class EventHeaders: def __init__(self, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain): self.cpu = common_cpu self.secs = common_secs self.nsecs = common_nsecs self.pid = common_pid self.comm = common_comm self.callchain = common_callchain def ts(self): return (self.secs * (10 ** 9)) + self.nsecs def ts_format(self): return "%d.%d" % (self.secs, int(self.nsecs / 1000))
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
# EventClass.py # SPDX-License-Identifier: GPL-2.0 # # This is a library defining some events types classes, which could # be used by other scripts to analyzing the perf samples. # # Currently there are just a few classes defined for examples, # PerfEvent is the base class for all perf event sample, PebsEvent # is a HW base Intel x86 PEBS event, and user could add more SW/HW # event classes based on requirements. from __future__ import print_function import struct # Event types, user could add more here EVTYPE_GENERIC = 0 EVTYPE_PEBS = 1 # Basic PEBS event EVTYPE_PEBS_LL = 2 # PEBS event with load latency info EVTYPE_IBS = 3 # # Currently we don't have good way to tell the event type, but by # the size of raw buffer, raw PEBS event with load latency data's # size is 176 bytes, while the pure PEBS event's size is 144 bytes. # def create_event(name, comm, dso, symbol, raw_buf): if (len(raw_buf) == 144): event = PebsEvent(name, comm, dso, symbol, raw_buf) elif (len(raw_buf) == 176): event = PebsNHM(name, comm, dso, symbol, raw_buf) else: event = PerfEvent(name, comm, dso, symbol, raw_buf) return event class PerfEvent(object): event_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): self.name = name self.comm = comm self.dso = dso self.symbol = symbol self.raw_buf = raw_buf self.ev_type = ev_type PerfEvent.event_num += 1 def show(self): print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)) # # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer # contains the context info when that event happened: the EFLAGS and # linear IP info, as well as all the registers. # class PebsEvent(PerfEvent): pebs_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): tmp_buf=raw_buf[0:80] flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) self.flags = flags self.ip = ip self.ax = ax self.bx = bx self.cx = cx self.dx = dx self.si = si self.di = di self.bp = bp self.sp = sp PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsEvent.pebs_num += 1 del tmp_buf # # Intel Nehalem and Westmere support PEBS plus Load Latency info which lie # in the four 64 bit words write after the PEBS data: # Status: records the IA32_PERF_GLOBAL_STATUS register value # DLA: Data Linear Address (EIP) # DSE: Data Source Encoding, where the latency happens, hit or miss # in L1/L2/L3 or IO operations # LAT: the actual latency in cycles # class PebsNHM(PebsEvent): pebs_nhm_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): tmp_buf=raw_buf[144:176] status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) self.status = status self.dla = dla self.dse = dse self.lat = lat PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsNHM.pebs_nhm_num += 1 del tmp_buf
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <[email protected]> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError("You need to install the wxpython lib for this script") class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
grace-kernel-grace-kernel-6.1.y
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
# SPDX-License-Identifier: GPL-2.0+ import os, csv, glob class CSVCollection(dict): delimiter=';' quotechar='"' source_column_name = 'Sources / Destinations' """ This class is a dictionary representation of the collection of sheets that exist in a given .ODS file. """ def __init__(self, pattern, skip_commented_lines=True, strip_lines=True): super(CSVCollection, self).__init__() self.pattern = pattern C = '#' if skip_commented_lines else 'blahblahblah' if strip_lines: strip = lambda s:s.strip() else: strip = lambda s:s # load all CSV files key = self.source_column_name for fname in glob.glob(pattern): with open(fname) as F: dR = csv.DictReader(F, delimiter=self.delimiter, quotechar=self.quotechar) name = os.path.basename(fname).partition('.')[0] D = { r[key]:{f:strip(c) for f,c in r.items() if f != key and f[:1] not in ['', C] and strip(c)[:1] not in ['', C]} for r in dR if r[key][:1] not in ['', C] } # now, go back through and eliminate all empty dictionaries D = {k:v for k,v in D.items() if v} self[name] = D
grace-kernel-grace-kernel-6.1.y
drivers/comedi/drivers/ni_routing/tools/csv_collection.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0+ # This is simply to aide in creating the entries in the order of the value of # the device-global NI signal/terminal constants defined in comedi.h import comedi_h import os, sys, re from csv_collection import CSVCollection def c_to_o(filename, prefix='\t\t\t\t\t ni_routing/', suffix=' \\'): if not filename.endswith('.c'): return '' return prefix + filename.rpartition('.c')[0] + '.o' + suffix def routedict_to_structinit_single(name, D, return_name=False): Locals = dict() lines = [ '\t.family = "{}",'.format(name), '\t.register_values = {', '\t\t/*', '\t\t * destination = {', '\t\t * source = register value,', '\t\t * ...', '\t\t * }', '\t\t */', ] if (False): # print table with index0:src, index1:dest D0 = D # (src-> dest->reg_value) #D1 : destD else: D0 = dict() for src, destD in D.items(): for dest, val in destD.items(): D0.setdefault(dest, {})[src] = val D0 = sorted(D0.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals)) for D0_sig, D1_D in D0: D1 = sorted(D1_D.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals)) lines.append('\t\t[B({})] = {{'.format(D0_sig)) for D1_sig, value in D1: if not re.match('[VIU]\([^)]*\)', value): sys.stderr.write('Invalid register format: {}\n'.format(repr(value))) sys.stderr.write( 'Register values should be formatted with V(),I(),or U()\n') raise RuntimeError('Invalid register values format') lines.append('\t\t\t[B({})]\t= {},'.format(D1_sig, value)) lines.append('\t\t},') lines.append('\t},') lines = '\n'.join(lines) if return_name: return N, lines else: return lines def routedict_to_routelist_single(name, D, indent=1): Locals = dict() indents = dict( I0 = '\t'*(indent), I1 = '\t'*(indent+1), I2 = '\t'*(indent+2), I3 = '\t'*(indent+3), I4 = '\t'*(indent+4), ) if (False): # data is src -> dest-list D0 = D keyname = 'src' valname = 'dest' else: # data is dest -> src-list keyname = 'dest' valname = 'src' D0 = dict() for src, destD in D.items(): for dest, val in destD.items(): D0.setdefault(dest, {})[src] = val # Sort by order of device-global names (numerically) D0 = sorted(D0.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals)) lines = [ '{I0}.device = "{name}",\n' '{I0}.routes = (struct ni_route_set[]){{' .format(name=name, **indents) ] for D0_sig, D1_D in D0: D1 = [ k for k,v in D1_D.items() if v ] D1.sort(key=lambda i: eval(i, comedi_h.__dict__, Locals)) lines.append('{I1}{{\n{I2}.{keyname} = {D0_sig},\n' '{I2}.{valname} = (int[]){{' .format(keyname=keyname, valname=valname, D0_sig=D0_sig, **indents) ) for D1_sig in D1: lines.append( '{I3}{D1_sig},'.format(D1_sig=D1_sig, **indents) ) lines.append( '{I3}0, /* Termination */'.format(**indents) ) lines.append('{I2}}}\n{I1}}},'.format(**indents)) lines.append('{I1}{{ /* Termination of list */\n{I2}.{keyname} = 0,\n{I1}}},' .format(keyname=keyname, **indents)) lines.append('{I0}}},'.format(**indents)) return '\n'.join(lines) class DeviceRoutes(CSVCollection): MKFILE_SEGMENTS = 'device-route.mk' SET_C = 'ni_device_routes.c' ITEMS_DIR = 'ni_device_routes' EXTERN_H = 'all.h' OUTPUT_DIR = 'c' output_file_top = """\ // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_routing/{filename} * List of valid routes for specific NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #include "ni_device_routes.h" #include "{extern_h}"\ """.format(filename=SET_C, extern_h=os.path.join(ITEMS_DIR, EXTERN_H)) extern_header = """\ /* SPDX-License-Identifier: GPL-2.0+ */ /* * comedi/drivers/ni_routing/{filename} * List of valid routes for specific NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #ifndef _COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H #define _COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H #include "../ni_device_routes.h" {externs} #endif //_COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H """ single_output_file_top = """\ // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_routing/{filename} * List of valid routes for specific NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #include "../ni_device_routes.h" #include "{extern_h}" struct ni_device_routes {table_name} = {{\ """ def __init__(self, pattern='csv/device_routes/*.csv'): super(DeviceRoutes,self).__init__(pattern) def to_listinit(self): chunks = [ self.output_file_top, '', 'struct ni_device_routes *const ni_device_routes_list[] = {' ] # put the sheets in lexical order of device numbers then bus sheets = sorted(self.items(), key=lambda i : tuple(i[0].split('-')[::-1]) ) externs = [] objs = [c_to_o(self.SET_C)] for sheet,D in sheets: S = sheet.lower() dev_table_name = 'ni_{}_device_routes'.format(S.replace('-','_')) sheet_filename = os.path.join(self.ITEMS_DIR,'{}.c'.format(S)) externs.append('extern struct ni_device_routes {};'.format(dev_table_name)) chunks.append('\t&{},'.format(dev_table_name)) s_chunks = [ self.single_output_file_top.format( filename = sheet_filename, table_name = dev_table_name, extern_h = self.EXTERN_H, ), routedict_to_routelist_single(S, D), '};', ] objs.append(c_to_o(sheet_filename)) with open(os.path.join(self.OUTPUT_DIR, sheet_filename), 'w') as f: f.write('\n'.join(s_chunks)) f.write('\n') with open(os.path.join(self.OUTPUT_DIR, self.MKFILE_SEGMENTS), 'w') as f: f.write('# This is the segment that should be included in comedi/drivers/Makefile\n') f.write('ni_routing-objs\t\t\t\t+= \\\n') f.write('\n'.join(objs)) f.write('\n') EXTERN_H = os.path.join(self.ITEMS_DIR, self.EXTERN_H) with open(os.path.join(self.OUTPUT_DIR, EXTERN_H), 'w') as f: f.write(self.extern_header.format( filename=EXTERN_H, externs='\n'.join(externs))) chunks.append('\tNULL,') # terminate list chunks.append('};') return '\n'.join(chunks) def save(self): filename=os.path.join(self.OUTPUT_DIR, self.SET_C) try: os.makedirs(os.path.join(self.OUTPUT_DIR, self.ITEMS_DIR)) except: pass with open(filename,'w') as f: f.write( self.to_listinit() ) f.write( '\n' ) class RouteValues(CSVCollection): MKFILE_SEGMENTS = 'route-values.mk' SET_C = 'ni_route_values.c' ITEMS_DIR = 'ni_route_values' EXTERN_H = 'all.h' OUTPUT_DIR = 'c' output_file_top = """\ // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_routing/{filename} * Route information for NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This file includes the tables that are a list of all the values of various * signals routes available on NI hardware. In many cases, one does not * explicitly make these routes, rather one might indicate that something is * used as the source of one particular trigger or another (using * *_src=TRIG_EXT). * * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #include "ni_route_values.h" #include "{extern_h}"\ """.format(filename=SET_C, extern_h=os.path.join(ITEMS_DIR, EXTERN_H)) extern_header = """\ /* SPDX-License-Identifier: GPL-2.0+ */ /* * comedi/drivers/ni_routing/{filename} * List of valid routes for specific NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #ifndef _COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H #define _COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H #include "../ni_route_values.h" {externs} #endif //_COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H """ single_output_file_top = """\ // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_routing/{filename} * Route information for {sheet} boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This file includes a list of all the values of various signals routes * available on NI 660x hardware. In many cases, one does not explicitly make * these routes, rather one might indicate that something is used as the source * of one particular trigger or another (using *_src=TRIG_EXT). * * The contents of this file can be generated using the tools in * comedi/drivers/ni_routing/tools. This file also contains specific notes to * this family of devices. * * Please use those tools to help maintain the contents of this file, but be * mindful to not lose the notes already made in this file, since these notes * are critical to a complete undertsanding of the register values of this * family. */ #include "../ni_route_values.h" #include "{extern_h}" const struct family_route_values {table_name} = {{\ """ def __init__(self, pattern='csv/route_values/*.csv'): super(RouteValues,self).__init__(pattern) def to_structinit(self): chunks = [ self.output_file_top, '', 'const struct family_route_values *const ni_all_route_values[] = {' ] # put the sheets in lexical order for consistency sheets = sorted(self.items(), key=lambda i : i[0] ) externs = [] objs = [c_to_o(self.SET_C)] for sheet,D in sheets: S = sheet.lower() fam_table_name = '{}_route_values'.format(S.replace('-','_')) sheet_filename = os.path.join(self.ITEMS_DIR,'{}.c'.format(S)) externs.append('extern const struct family_route_values {};'.format(fam_table_name)) chunks.append('\t&{},'.format(fam_table_name)) s_chunks = [ self.single_output_file_top.format( filename = sheet_filename, sheet = sheet.upper(), table_name = fam_table_name, extern_h = self.EXTERN_H, ), routedict_to_structinit_single(S, D), '};', ] objs.append(c_to_o(sheet_filename)) with open(os.path.join(self.OUTPUT_DIR, sheet_filename), 'w') as f: f.write('\n'.join(s_chunks)) f.write( '\n' ) with open(os.path.join(self.OUTPUT_DIR, self.MKFILE_SEGMENTS), 'w') as f: f.write('# This is the segment that should be included in comedi/drivers/Makefile\n') f.write('ni_routing-objs\t\t\t\t+= \\\n') f.write('\n'.join(objs)) f.write('\n') EXTERN_H = os.path.join(self.ITEMS_DIR, self.EXTERN_H) with open(os.path.join(self.OUTPUT_DIR, EXTERN_H), 'w') as f: f.write(self.extern_header.format( filename=EXTERN_H, externs='\n'.join(externs))) chunks.append('\tNULL,') # terminate list chunks.append('};') return '\n'.join(chunks) def save(self): filename=os.path.join(self.OUTPUT_DIR, self.SET_C) try: os.makedirs(os.path.join(self.OUTPUT_DIR, self.ITEMS_DIR)) except: pass with open(filename,'w') as f: f.write( self.to_structinit() ) f.write( '\n' ) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--route_values', action='store_true', help='Extract route values from csv/route_values/*.csv' ) parser.add_argument( '--device_routes', action='store_true', help='Extract route values from csv/device_routes/*.csv' ) args = parser.parse_args() KL = list() if args.route_values: KL.append( RouteValues ) if args.device_routes: KL.append( DeviceRoutes ) if not KL: parser.error('nothing to do...') for K in KL: doc = K() doc.save()
grace-kernel-grace-kernel-6.1.y
drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py
# SPDX-License-Identifier: GPL-2.0+ """ This file helps to extract string names of NI signals as included in comedi.h between NI_NAMES_BASE and NI_NAMES_BASE+NI_NUM_NAMES. """ # This is simply to aide in creating the entries in the order of the value of # the device-global NI signal/terminal constants defined in comedi.h import comedi_h ni_macros = ( 'NI_PFI', 'TRIGGER_LINE', 'NI_RTSI_BRD', 'NI_CtrSource', 'NI_CtrGate', 'NI_CtrAux', 'NI_CtrA', 'NI_CtrB', 'NI_CtrZ', 'NI_CtrArmStartTrigger', 'NI_CtrInternalOutput', 'NI_CtrOut', 'NI_CtrSampleClock', ) def get_ni_names(): name_dict = dict() # load all the static names; start with those that do not begin with NI_ name_dict['PXI_Star'] = comedi_h.PXI_Star name_dict['PXI_Clk10'] = comedi_h.PXI_Clk10 #load all macro values for fun in ni_macros: f = getattr(comedi_h, fun) name_dict.update({ '{}({})'.format(fun,i):f(i) for i in range(1 + f(-1) - f(0)) }) #load everything else in ni_common_signal_names enum name_dict.update({ k:v for k,v in comedi_h.__dict__.items() if k.startswith('NI_') and (not callable(v)) and comedi_h.NI_COUNTER_NAMES_MAX < v < (comedi_h.NI_NAMES_BASE + comedi_h.NI_NUM_NAMES) }) # now create reverse lookup (value -> name) val_dict = {v:k for k,v in name_dict.items()} return name_dict, val_dict name_to_value, value_to_name = get_ni_names()
grace-kernel-grace-kernel-6.1.y
drivers/comedi/drivers/ni_routing/tools/ni_names.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0+ from os import path import os, csv from itertools import chain from csv_collection import CSVCollection from ni_names import value_to_name import ni_values CSV_DIR = 'csv' def iter_src_values(D): return D.items() def iter_src(D): for dest in D: yield dest, 1 def create_csv(name, D, src_iter): # have to change dest->{src:val} to src->{dest:val} fieldnames = [value_to_name[i] for i in sorted(D.keys())] fieldnames.insert(0, CSVCollection.source_column_name) S = dict() for dest, srcD in D.items(): for src,val in src_iter(srcD): S.setdefault(src,{})[dest] = val S = sorted(S.items(), key = lambda src_destD : src_destD[0]) csv_fname = path.join(CSV_DIR, name + '.csv') with open(csv_fname, 'w') as F_csv: dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"') dR.writeheader() # now change the json back into the csv dictionaries rows = [ dict(chain( ((CSVCollection.source_column_name,value_to_name[src]),), *(((value_to_name[dest],v),) for dest,v in destD.items()) )) for src, destD in S ] dR.writerows(rows) def to_csv(): for d in ['route_values', 'device_routes']: try: os.makedirs(path.join(CSV_DIR,d)) except: pass for family, dst_src_map in ni_values.ni_route_values.items(): create_csv(path.join('route_values',family), dst_src_map, iter_src_values) for device, dst_src_map in ni_values.ni_device_routes.items(): create_csv(path.join('device_routes',device), dst_src_map, iter_src) if __name__ == '__main__': to_csv()
grace-kernel-grace-kernel-6.1.y
drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0+ from os import path import os, csv from csv_collection import CSVCollection from ni_names import value_to_name CSV_DIR = 'csv' def to_csv(): try: os.makedirs(CSV_DIR) except: pass csv_fname = path.join(CSV_DIR, 'blank_route_table.csv') fieldnames = [sig for sig_val, sig in sorted(value_to_name.items())] fieldnames.insert(0, CSVCollection.source_column_name) with open(csv_fname, 'w') as F_csv: dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"') dR.writeheader() for sig in fieldnames[1:]: dR.writerow({CSVCollection.source_column_name: sig}) if __name__ == '__main__': to_csv()
grace-kernel-grace-kernel-6.1.y
drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print("Usage: %s FILE" % sys.argv[0]) sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = int(m.group(2), 16) end = int(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0 num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += int(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print("No errors detected in %u functions." % num_funcs) else: if num_errors > 1: err="errors" else: err="error" print("%u %s detected in %u functions." % (num_errors, err, num_funcs)) sys.exit(1)
grace-kernel-grace-kernel-6.1.y
arch/ia64/scripts/unwcheck.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 """generate_rust_analyzer - Generates the `rust-project.json` file for `rust-analyzer`. """ import argparse import json import logging import pathlib import sys def generate_crates(srctree, objtree, sysroot_src): # Generate the configuration list. cfg = [] with open(objtree / "include" / "generated" / "rustc_cfg") as fd: for line in fd: line = line.replace("--cfg=", "") line = line.replace("\n", "") cfg.append(line) # Now fill the crates list -- dependencies need to come first. # # Avoid O(n^2) iterations by keeping a map of indexes. crates = [] crates_indexes = {} def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False): crates_indexes[display_name] = len(crates) crates.append({ "display_name": display_name, "root_module": str(root_module), "is_workspace_member": is_workspace_member, "is_proc_macro": is_proc_macro, "deps": [{"crate": crates_indexes[dep], "name": dep} for dep in deps], "cfg": cfg, "edition": "2021", "env": { "RUST_MODFILE": "This is only for rust-analyzer" } }) # First, the ones in `rust/` since they are a bit special. append_crate( "core", sysroot_src / "core" / "src" / "lib.rs", [], is_workspace_member=False, ) append_crate( "compiler_builtins", srctree / "rust" / "compiler_builtins.rs", [], ) append_crate( "alloc", srctree / "rust" / "alloc" / "lib.rs", ["core", "compiler_builtins"], ) append_crate( "macros", srctree / "rust" / "macros" / "lib.rs", [], is_proc_macro=True, ) crates[-1]["proc_macro_dylib_path"] = "rust/libmacros.so" append_crate( "bindings", srctree / "rust"/ "bindings" / "lib.rs", ["core"], cfg=cfg, ) crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True)) append_crate( "kernel", srctree / "rust" / "kernel" / "lib.rs", ["core", "alloc", "macros", "bindings"], cfg=cfg, ) crates[-1]["source"] = { "include_dirs": [ str(srctree / "rust" / "kernel"), str(objtree / "rust") ], "exclude_dirs": [], } # Then, the rest outside of `rust/`. # # We explicitly mention the top-level folders we want to cover. for folder in ("samples", "drivers"): for path in (srctree / folder).rglob("*.rs"): logging.info("Checking %s", path) name = path.name.replace(".rs", "") # Skip those that are not crate roots. if f"{name}.o" not in open(path.parent / "Makefile").read(): continue logging.info("Adding %s", name) append_crate( name, path, ["core", "alloc", "kernel"], cfg=cfg, ) return crates def main(): parser = argparse.ArgumentParser() parser.add_argument('--verbose', '-v', action='store_true') parser.add_argument("srctree", type=pathlib.Path) parser.add_argument("objtree", type=pathlib.Path) parser.add_argument("sysroot_src", type=pathlib.Path) args = parser.parse_args() logging.basicConfig( format="[%(asctime)s] [%(levelname)s] %(message)s", level=logging.INFO if args.verbose else logging.WARNING ) rust_project = { "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src), "sysroot_src": str(args.sysroot_src), } json.dump(rust_project, sys.stdout, sort_keys=True, indent=4) if __name__ == "__main__": main()
grace-kernel-grace-kernel-6.1.y
scripts/generate_rust_analyzer.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2018-2019 Netronome Systems, Inc. # Copyright (C) 2021 Isovalent, Inc. # In case user attempts to run with Python 2. from __future__ import print_function import argparse import re import sys, os import subprocess helpersDocStart = 'Start of BPF helper function descriptions:' class NoHelperFound(BaseException): pass class NoSyscallCommandFound(BaseException): pass class ParsingError(BaseException): def __init__(self, line='<line not provided>', reader=None): if reader: BaseException.__init__(self, 'Error at file offset %d, parsing line: %s' % (reader.tell(), line)) else: BaseException.__init__(self, 'Error parsing line: %s' % line) class APIElement(object): """ An object representing the description of an aspect of the eBPF API. @proto: prototype of the API symbol @desc: textual description of the symbol @ret: (optional) description of any associated return value """ def __init__(self, proto='', desc='', ret=''): self.proto = proto self.desc = desc self.ret = ret class Helper(APIElement): """ An object representing the description of an eBPF helper function. @proto: function prototype of the helper function @desc: textual description of the helper function @ret: description of the return value of the helper function """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.enum_val = None def proto_break_down(self): """ Break down helper function protocol into smaller chunks: return type, name, distincts arguments. """ arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$') res = {} proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$') capture = proto_re.match(self.proto) res['ret_type'] = capture.group(1) res['ret_star'] = capture.group(2) res['name'] = capture.group(3) res['args'] = [] args = capture.group(4).split(', ') for a in args: capture = arg_re.match(a) res['args'].append({ 'type' : capture.group(1), 'star' : capture.group(5), 'name' : capture.group(6) }) return res class HeaderParser(object): """ An object used to parse a file in order to extract the documentation of a list of eBPF helper functions. All the helpers that can be retrieved are stored as Helper object, in the self.helpers() array. @filename: name of file to parse, usually include/uapi/linux/bpf.h in the kernel tree """ def __init__(self, filename): self.reader = open(filename, 'r') self.line = '' self.helpers = [] self.commands = [] self.desc_unique_helpers = set() self.define_unique_helpers = [] self.helper_enum_vals = {} self.desc_syscalls = [] self.enum_syscalls = [] def parse_element(self): proto = self.parse_symbol() desc = self.parse_desc(proto) ret = self.parse_ret(proto) return APIElement(proto=proto, desc=desc, ret=ret) def parse_helper(self): proto = self.parse_proto() desc = self.parse_desc(proto) ret = self.parse_ret(proto) return Helper(proto=proto, desc=desc, ret=ret) def parse_symbol(self): p = re.compile(' \* ?(BPF\w+)$') capture = p.match(self.line) if not capture: raise NoSyscallCommandFound end_re = re.compile(' \* ?NOTES$') end = end_re.match(self.line) if end: raise NoSyscallCommandFound self.line = self.reader.readline() return capture.group(1) def parse_proto(self): # Argument can be of shape: # - "void" # - "type name" # - "type *name" # - Same as above, with "const" and/or "struct" in front of type # - "..." (undefined number of arguments, for bpf_trace_printk()) # There is at least one term ("void"), and at most five arguments. p = re.compile(' \* ?((.+) \**\w+\((((const )?(struct )?(\w+|\.\.\.)( \**\w+)?)(, )?){1,5}\))$') capture = p.match(self.line) if not capture: raise NoHelperFound self.line = self.reader.readline() return capture.group(1) def parse_desc(self, proto): p = re.compile(' \* ?(?:\t| {5,8})Description$') capture = p.match(self.line) if not capture: raise Exception("No description section found for " + proto) # Description can be several lines, some of them possibly empty, and it # stops when another subsection title is met. desc = '' desc_present = False while True: self.line = self.reader.readline() if self.line == ' *\n': desc += '\n' else: p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)') capture = p.match(self.line) if capture: desc_present = True desc += capture.group(1) + '\n' else: break if not desc_present: raise Exception("No description found for " + proto) return desc def parse_ret(self, proto): p = re.compile(' \* ?(?:\t| {5,8})Return$') capture = p.match(self.line) if not capture: raise Exception("No return section found for " + proto) # Return value description can be several lines, some of them possibly # empty, and it stops when another subsection title is met. ret = '' ret_present = False while True: self.line = self.reader.readline() if self.line == ' *\n': ret += '\n' else: p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)') capture = p.match(self.line) if capture: ret_present = True ret += capture.group(1) + '\n' else: break if not ret_present: raise Exception("No return found for " + proto) return ret def seek_to(self, target, help_message, discard_lines = 1): self.reader.seek(0) offset = self.reader.read().find(target) if offset == -1: raise Exception(help_message) self.reader.seek(offset) self.reader.readline() for _ in range(discard_lines): self.reader.readline() self.line = self.reader.readline() def parse_desc_syscall(self): self.seek_to('* DOC: eBPF Syscall Commands', 'Could not find start of eBPF syscall descriptions list') while True: try: command = self.parse_element() self.commands.append(command) self.desc_syscalls.append(command.proto) except NoSyscallCommandFound: break def parse_enum_syscall(self): self.seek_to('enum bpf_cmd {', 'Could not find start of bpf_cmd enum', 0) # Searches for either one or more BPF\w+ enums bpf_p = re.compile('\s*(BPF\w+)+') # Searches for an enum entry assigned to another entry, # for e.g. BPF_PROG_RUN = BPF_PROG_TEST_RUN, which is # not documented hence should be skipped in check to # determine if the right number of syscalls are documented assign_p = re.compile('\s*(BPF\w+)\s*=\s*(BPF\w+)') bpf_cmd_str = '' while True: capture = assign_p.match(self.line) if capture: # Skip line if an enum entry is assigned to another entry self.line = self.reader.readline() continue capture = bpf_p.match(self.line) if capture: bpf_cmd_str += self.line else: break self.line = self.reader.readline() # Find the number of occurences of BPF\w+ self.enum_syscalls = re.findall('(BPF\w+)+', bpf_cmd_str) def parse_desc_helpers(self): self.seek_to(helpersDocStart, 'Could not find start of eBPF helper descriptions list') while True: try: helper = self.parse_helper() self.helpers.append(helper) proto = helper.proto_break_down() self.desc_unique_helpers.add(proto['name']) except NoHelperFound: break def parse_define_helpers(self): # Parse FN(...) in #define __BPF_FUNC_MAPPER to compare later with the # number of unique function names present in description and use the # correct enumeration value. # Note: seek_to(..) discards the first line below the target search text, # resulting in FN(unspec) being skipped and not added to self.define_unique_helpers. self.seek_to('#define __BPF_FUNC_MAPPER(FN)', 'Could not find start of eBPF helper definition list') # Searches for one FN(\w+) define or a backslash for newline p = re.compile('\s*FN\((\w+)\)|\\\\') fn_defines_str = '' i = 1 # 'unspec' is skipped as mentioned above while True: capture = p.match(self.line) if capture: fn_defines_str += self.line self.helper_enum_vals[capture.expand(r'bpf_\1')] = i i += 1 else: break self.line = self.reader.readline() # Find the number of occurences of FN(\w+) self.define_unique_helpers = re.findall('FN\(\w+\)', fn_defines_str) def assign_helper_values(self): seen_helpers = set() for helper in self.helpers: proto = helper.proto_break_down() name = proto['name'] try: enum_val = self.helper_enum_vals[name] except KeyError: raise Exception("Helper %s is missing from enum bpf_func_id" % name) # Enforce current practice of having the descriptions ordered # by enum value. seen_helpers.add(name) desc_val = len(seen_helpers) if desc_val != enum_val: raise Exception("Helper %s comment order (#%d) must be aligned with its position (#%d) in enum bpf_func_id" % (name, desc_val, enum_val)) helper.enum_val = enum_val def run(self): self.parse_desc_syscall() self.parse_enum_syscall() self.parse_desc_helpers() self.parse_define_helpers() self.assign_helper_values() self.reader.close() ############################################################################### class Printer(object): """ A generic class for printers. Printers should be created with an array of Helper objects, and implement a way to print them in the desired fashion. @parser: A HeaderParser with objects to print to standard output """ def __init__(self, parser): self.parser = parser self.elements = [] def print_header(self): pass def print_footer(self): pass def print_one(self, helper): pass def print_all(self): self.print_header() for elem in self.elements: self.print_one(elem) self.print_footer() def elem_number_check(self, desc_unique_elem, define_unique_elem, type, instance): """ Checks the number of helpers/syscalls documented within the header file description with those defined as part of enum/macro and raise an Exception if they don't match. """ nr_desc_unique_elem = len(desc_unique_elem) nr_define_unique_elem = len(define_unique_elem) if nr_desc_unique_elem != nr_define_unique_elem: exception_msg = ''' The number of unique %s in description (%d) doesn\'t match the number of unique %s defined in %s (%d) ''' % (type, nr_desc_unique_elem, type, instance, nr_define_unique_elem) if nr_desc_unique_elem < nr_define_unique_elem: # Function description is parsed until no helper is found (which can be due to # misformatting). Hence, only print the first missing/misformatted helper/enum. exception_msg += ''' The description for %s is not present or formatted correctly. ''' % (define_unique_elem[nr_desc_unique_elem]) raise Exception(exception_msg) class PrinterRST(Printer): """ A generic class for printers that print ReStructured Text. Printers should be created with a HeaderParser object, and implement a way to print API elements in the desired fashion. @parser: A HeaderParser with objects to print to standard output """ def __init__(self, parser): self.parser = parser def print_license(self): license = '''\ .. Copyright (C) All BPF authors and contributors from 2014 to present. .. See git log include/uapi/linux/bpf.h in kernel tree for details. .. .. SPDX-License-Identifier: Linux-man-pages-copyleft .. .. Please do not edit this file. It was generated from the documentation .. located in file include/uapi/linux/bpf.h of the Linux kernel sources .. (helpers description), and from scripts/bpf_doc.py in the same .. repository (header and footer). ''' print(license) def print_elem(self, elem): if (elem.desc): print('\tDescription') # Do not strip all newline characters: formatted code at the end of # a section must be followed by a blank line. for line in re.sub('\n$', '', elem.desc, count=1).split('\n'): print('{}{}'.format('\t\t' if line else '', line)) if (elem.ret): print('\tReturn') for line in elem.ret.rstrip().split('\n'): print('{}{}'.format('\t\t' if line else '', line)) print('') def get_kernel_version(self): try: version = subprocess.run(['git', 'describe'], cwd=linuxRoot, capture_output=True, check=True) version = version.stdout.decode().rstrip() except: try: version = subprocess.run(['make', 'kernelversion'], cwd=linuxRoot, capture_output=True, check=True) version = version.stdout.decode().rstrip() except: return 'Linux' return 'Linux {version}'.format(version=version) def get_last_doc_update(self, delimiter): try: cmd = ['git', 'log', '-1', '--pretty=format:%cs', '--no-patch', '-L', '/{}/,/\*\//:include/uapi/linux/bpf.h'.format(delimiter)] date = subprocess.run(cmd, cwd=linuxRoot, capture_output=True, check=True) return date.stdout.decode().rstrip() except: return '' class PrinterHelpersRST(PrinterRST): """ A printer for dumping collected information about helpers as a ReStructured Text page compatible with the rst2man program, which can be used to generate a manual page for the helpers. @parser: A HeaderParser with Helper objects to print to standard output """ def __init__(self, parser): self.elements = parser.helpers self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '__BPF_FUNC_MAPPER') def print_header(self): header = '''\ =========== BPF-HELPERS =========== ------------------------------------------------------------------------------- list of eBPF helper functions ------------------------------------------------------------------------------- :Manual section: 7 :Version: {version} {date_field}{date} DESCRIPTION =========== The extended Berkeley Packet Filter (eBPF) subsystem consists in programs written in a pseudo-assembly language, then attached to one of the several kernel hooks and run in reaction of specific events. This framework differs from the older, "classic" BPF (or "cBPF") in several aspects, one of them being the ability to call special functions (or "helpers") from within a program. These functions are restricted to a white-list of helpers defined in the kernel. These helpers are used by eBPF programs to interact with the system, or with the context in which they work. For instance, they can be used to print debugging messages, to get the time since the system was booted, to interact with eBPF maps, or to manipulate network packets. Since there are several eBPF program types, and that they do not run in the same context, each program type can only call a subset of those helpers. Due to eBPF conventions, a helper can not have more than five arguments. Internally, eBPF programs call directly into the compiled helper functions without requiring any foreign-function interface. As a result, calling helpers introduces no overhead, thus offering excellent performance. This document is an attempt to list and document the helpers available to eBPF developers. They are sorted by chronological order (the oldest helpers in the kernel at the top). HELPERS ======= ''' kernelVersion = self.get_kernel_version() lastUpdate = self.get_last_doc_update(helpersDocStart) PrinterRST.print_license(self) print(header.format(version=kernelVersion, date_field = ':Date: ' if lastUpdate else '', date=lastUpdate)) def print_footer(self): footer = ''' EXAMPLES ======== Example usage for most of the eBPF helpers listed in this manual page are available within the Linux kernel sources, at the following locations: * *samples/bpf/* * *tools/testing/selftests/bpf/* LICENSE ======= eBPF programs can have an associated license, passed along with the bytecode instructions to the kernel when the programs are loaded. The format for that string is identical to the one in use for kernel modules (Dual licenses, such as "Dual BSD/GPL", may be used). Some helper functions are only accessible to programs that are compatible with the GNU Privacy License (GPL). In order to use such helpers, the eBPF program must be loaded with the correct license string passed (via **attr**) to the **bpf**\ () system call, and this generally translates into the C source code of the program containing a line similar to the following: :: char ____license[] __attribute__((section("license"), used)) = "GPL"; IMPLEMENTATION ============== This manual page is an effort to document the existing eBPF helper functions. But as of this writing, the BPF sub-system is under heavy development. New eBPF program or map types are added, along with new helper functions. Some helpers are occasionally made available for additional program types. So in spite of the efforts of the community, this page might not be up-to-date. If you want to check by yourself what helper functions exist in your kernel, or what types of programs they can support, here are some files among the kernel tree that you may be interested in: * *include/uapi/linux/bpf.h* is the main BPF header. It contains the full list of all helper functions, as well as many other BPF definitions including most of the flags, structs or constants used by the helpers. * *net/core/filter.c* contains the definition of most network-related helper functions, and the list of program types from which they can be used. * *kernel/trace/bpf_trace.c* is the equivalent for most tracing program-related helpers. * *kernel/bpf/verifier.c* contains the functions used to check that valid types of eBPF maps are used with a given helper function. * *kernel/bpf/* directory contains other files in which additional helpers are defined (for cgroups, sockmaps, etc.). * The bpftool utility can be used to probe the availability of helper functions on the system (as well as supported program and map types, and a number of other parameters). To do so, run **bpftool feature probe** (see **bpftool-feature**\ (8) for details). Add the **unprivileged** keyword to list features available to unprivileged users. Compatibility between helper functions and program types can generally be found in the files where helper functions are defined. Look for the **struct bpf_func_proto** objects and for functions returning them: these functions contain a list of helpers that a given program type can call. Note that the **default:** label of the **switch ... case** used to filter helpers can call other functions, themselves allowing access to additional helpers. The requirement for GPL license is also in those **struct bpf_func_proto**. Compatibility between helper functions and map types can be found in the **check_map_func_compatibility**\ () function in file *kernel/bpf/verifier.c*. Helper functions that invalidate the checks on **data** and **data_end** pointers for network processing are listed in function **bpf_helper_changes_pkt_data**\ () in file *net/core/filter.c*. SEE ALSO ======== **bpf**\ (2), **bpftool**\ (8), **cgroups**\ (7), **ip**\ (8), **perf_event_open**\ (2), **sendmsg**\ (2), **socket**\ (7), **tc-bpf**\ (8)''' print(footer) def print_proto(self, helper): """ Format function protocol with bold and italics markers. This makes RST file less readable, but gives nice results in the manual page. """ proto = helper.proto_break_down() print('**%s %s%s(' % (proto['ret_type'], proto['ret_star'].replace('*', '\\*'), proto['name']), end='') comma = '' for a in proto['args']: one_arg = '{}{}'.format(comma, a['type']) if a['name']: if a['star']: one_arg += ' {}**\ '.format(a['star'].replace('*', '\\*')) else: one_arg += '** ' one_arg += '*{}*\\ **'.format(a['name']) comma = ', ' print(one_arg, end='') print(')**') def print_one(self, helper): self.print_proto(helper) self.print_elem(helper) class PrinterSyscallRST(PrinterRST): """ A printer for dumping collected information about the syscall API as a ReStructured Text page compatible with the rst2man program, which can be used to generate a manual page for the syscall. @parser: A HeaderParser with APIElement objects to print to standard output """ def __init__(self, parser): self.elements = parser.commands self.elem_number_check(parser.desc_syscalls, parser.enum_syscalls, 'syscall', 'bpf_cmd') def print_header(self): header = '''\ === bpf === ------------------------------------------------------------------------------- Perform a command on an extended BPF object ------------------------------------------------------------------------------- :Manual section: 2 COMMANDS ======== ''' PrinterRST.print_license(self) print(header) def print_one(self, command): print('**%s**' % (command.proto)) self.print_elem(command) class PrinterHelpers(Printer): """ A printer for dumping collected information about helpers as C header to be included from BPF program. @parser: A HeaderParser with Helper objects to print to standard output """ def __init__(self, parser): self.elements = parser.helpers self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '__BPF_FUNC_MAPPER') type_fwds = [ 'struct bpf_fib_lookup', 'struct bpf_sk_lookup', 'struct bpf_perf_event_data', 'struct bpf_perf_event_value', 'struct bpf_pidns_info', 'struct bpf_redir_neigh', 'struct bpf_sock', 'struct bpf_sock_addr', 'struct bpf_sock_ops', 'struct bpf_sock_tuple', 'struct bpf_spin_lock', 'struct bpf_sysctl', 'struct bpf_tcp_sock', 'struct bpf_tunnel_key', 'struct bpf_xfrm_state', 'struct linux_binprm', 'struct pt_regs', 'struct sk_reuseport_md', 'struct sockaddr', 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', 'struct unix_sock', 'struct task_struct', 'struct __sk_buff', 'struct sk_msg_md', 'struct xdp_md', 'struct path', 'struct btf_ptr', 'struct inode', 'struct socket', 'struct file', 'struct bpf_timer', 'struct mptcp_sock', 'struct bpf_dynptr', 'struct iphdr', 'struct ipv6hdr', ] known_types = { '...', 'void', 'const void', 'char', 'const char', 'int', 'long', 'unsigned long', '__be16', '__be32', '__wsum', 'struct bpf_fib_lookup', 'struct bpf_perf_event_data', 'struct bpf_perf_event_value', 'struct bpf_pidns_info', 'struct bpf_redir_neigh', 'struct bpf_sk_lookup', 'struct bpf_sock', 'struct bpf_sock_addr', 'struct bpf_sock_ops', 'struct bpf_sock_tuple', 'struct bpf_spin_lock', 'struct bpf_sysctl', 'struct bpf_tcp_sock', 'struct bpf_tunnel_key', 'struct bpf_xfrm_state', 'struct linux_binprm', 'struct pt_regs', 'struct sk_reuseport_md', 'struct sockaddr', 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', 'struct unix_sock', 'struct task_struct', 'struct path', 'struct btf_ptr', 'struct inode', 'struct socket', 'struct file', 'struct bpf_timer', 'struct mptcp_sock', 'struct bpf_dynptr', 'struct iphdr', 'struct ipv6hdr', } mapped_types = { 'u8': '__u8', 'u16': '__u16', 'u32': '__u32', 'u64': '__u64', 's8': '__s8', 's16': '__s16', 's32': '__s32', 's64': '__s64', 'size_t': 'unsigned long', 'struct bpf_map': 'void', 'struct sk_buff': 'struct __sk_buff', 'const struct sk_buff': 'const struct __sk_buff', 'struct sk_msg_buff': 'struct sk_msg_md', 'struct xdp_buff': 'struct xdp_md', } # Helpers overloaded for different context types. overloaded_helpers = [ 'bpf_get_socket_cookie', 'bpf_sk_assign', ] def print_header(self): header = '''\ /* This is auto-generated file. See bpf_doc.py for details. */ /* Forward declarations of BPF structs */''' print(header) for fwd in self.type_fwds: print('%s;' % fwd) print('') def print_footer(self): footer = '' print(footer) def map_type(self, t): if t in self.known_types: return t if t in self.mapped_types: return self.mapped_types[t] print("Unrecognized type '%s', please add it to known types!" % t, file=sys.stderr) sys.exit(1) seen_helpers = set() def print_one(self, helper): proto = helper.proto_break_down() if proto['name'] in self.seen_helpers: return self.seen_helpers.add(proto['name']) print('/*') print(" * %s" % proto['name']) print(" *") if (helper.desc): # Do not strip all newline characters: formatted code at the end of # a section must be followed by a blank line. for line in re.sub('\n$', '', helper.desc, count=1).split('\n'): print(' *{}{}'.format(' \t' if line else '', line)) if (helper.ret): print(' *') print(' * Returns') for line in helper.ret.rstrip().split('\n'): print(' *{}{}'.format(' \t' if line else '', line)) print(' */') print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']), proto['ret_star'], proto['name']), end='') comma = '' for i, a in enumerate(proto['args']): t = a['type'] n = a['name'] if proto['name'] in self.overloaded_helpers and i == 0: t = 'void' n = 'ctx' one_arg = '{}{}'.format(comma, self.map_type(t)) if n: if a['star']: one_arg += ' {}'.format(a['star']) else: one_arg += ' ' one_arg += '{}'.format(n) comma = ', ' print(one_arg, end='') print(') = (void *) %d;' % helper.enum_val) print('') ############################################################################### # If script is launched from scripts/ from kernel tree and can access # ../include/uapi/linux/bpf.h, use it as a default name for the file to parse, # otherwise the --filename argument will be required from the command line. script = os.path.abspath(sys.argv[0]) linuxRoot = os.path.dirname(os.path.dirname(script)) bpfh = os.path.join(linuxRoot, 'include/uapi/linux/bpf.h') printers = { 'helpers': PrinterHelpersRST, 'syscall': PrinterSyscallRST, } argParser = argparse.ArgumentParser(description=""" Parse eBPF header file and generate documentation for the eBPF API. The RST-formatted output produced can be turned into a manual page with the rst2man utility. """) argParser.add_argument('--header', action='store_true', help='generate C header file') if (os.path.isfile(bpfh)): argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h', default=bpfh) else: argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h') argParser.add_argument('target', nargs='?', default='helpers', choices=printers.keys(), help='eBPF API target') args = argParser.parse_args() # Parse file. headerParser = HeaderParser(args.filename) headerParser.run() # Print formatted output to standard output. if args.header: if args.target != 'helpers': raise NotImplementedError('Only helpers header generation is supported') printer = PrinterHelpers(headerParser) else: printer = printers[args.target](headerParser) printer.print_all()
grace-kernel-grace-kernel-6.1.y
scripts/bpf_doc.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-only """Find Kconfig symbols that are referenced but not defined.""" # (c) 2014-2017 Valentin Rothberg <[email protected]> # (c) 2014 Stefan Hengelein <[email protected]> # import argparse import difflib import os import re import signal import subprocess import sys from multiprocessing import Pool, cpu_count # regex expressions OPERATORS = r"&|\(|\)|\||\!" SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}" DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*" EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+" DEFAULT = r"default\s+.*?(?:if\s.+){,1}" STMT = r"^\s*(?:if|select|imply|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")" # regex objects REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") REGEX_SYMBOL = re.compile(r'(?!\B)' + SYMBOL + r'(?!\B)') REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL) REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) REGEX_KCONFIG_STMT = re.compile(STMT) REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$") REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") REGEX_QUOTES = re.compile("(\"(.*?)\")") def parse_options(): """The user interface of this module.""" usage = "Run this tool to detect Kconfig symbols that are referenced but " \ "not defined in Kconfig. If no option is specified, " \ "checkkconfigsymbols defaults to check your current tree. " \ "Please note that specifying commits will 'git reset --hard\' " \ "your current tree! You may save uncommitted changes to avoid " \ "losing data." parser = argparse.ArgumentParser(description=usage) parser.add_argument('-c', '--commit', dest='commit', action='store', default="", help="check if the specified commit (hash) introduces " "undefined Kconfig symbols") parser.add_argument('-d', '--diff', dest='diff', action='store', default="", help="diff undefined symbols between two commits " "(e.g., -d commmit1..commit2)") parser.add_argument('-f', '--find', dest='find', action='store_true', default=False, help="find and show commits that may cause symbols to be " "missing (required to run with --diff)") parser.add_argument('-i', '--ignore', dest='ignore', action='store', default="", help="ignore files matching this Python regex " "(e.g., -i '.*defconfig')") parser.add_argument('-s', '--sim', dest='sim', action='store', default="", help="print a list of max. 10 string-similar symbols") parser.add_argument('--force', dest='force', action='store_true', default=False, help="reset current Git tree even when it's dirty") parser.add_argument('--no-color', dest='color', action='store_false', default=True, help="don't print colored output (default when not " "outputting to a terminal)") args = parser.parse_args() if args.commit and args.diff: sys.exit("Please specify only one option at once.") if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff): sys.exit("Please specify valid input in the following format: " "\'commit1..commit2\'") if args.commit or args.diff: if not args.force and tree_is_dirty(): sys.exit("The current Git tree is dirty (see 'git status'). " "Running this script may\ndelete important data since it " "calls 'git reset --hard' for some performance\nreasons. " " Please run this script in a clean Git tree or pass " "'--force' if you\nwant to ignore this warning and " "continue.") if args.commit: if args.commit.startswith('HEAD'): sys.exit("The --commit option can't use the HEAD ref") args.find = False if args.ignore: try: re.match(args.ignore, "this/is/just/a/test.c") except: sys.exit("Please specify a valid Python regex.") return args def print_undefined_symbols(): """Main function of this module.""" args = parse_options() global COLOR COLOR = args.color and sys.stdout.isatty() if args.sim and not args.commit and not args.diff: sims = find_sims(args.sim, args.ignore) if sims: print("%s: %s" % (yel("Similar symbols"), ', '.join(sims))) else: print("%s: no similar symbols found" % yel("Similar symbols")) sys.exit(0) # dictionary of (un)defined symbols defined = {} undefined = {} if args.commit or args.diff: head = get_head() # get commit range commit_a = None commit_b = None if args.commit: commit_a = args.commit + "~" commit_b = args.commit elif args.diff: split = args.diff.split("..") commit_a = split[0] commit_b = split[1] undefined_a = {} undefined_b = {} # get undefined items before the commit reset(commit_a) undefined_a, _ = check_symbols(args.ignore) # get undefined items for the commit reset(commit_b) undefined_b, defined = check_symbols(args.ignore) # report cases that are present for the commit but not before for symbol in sorted(undefined_b): # symbol has not been undefined before if symbol not in undefined_a: files = sorted(undefined_b.get(symbol)) undefined[symbol] = files # check if there are new files that reference the undefined symbol else: files = sorted(undefined_b.get(symbol) - undefined_a.get(symbol)) if files: undefined[symbol] = files # reset to head reset(head) # default to check the entire tree else: undefined, defined = check_symbols(args.ignore) # now print the output for symbol in sorted(undefined): print(red(symbol)) files = sorted(undefined.get(symbol)) print("%s: %s" % (yel("Referencing files"), ", ".join(files))) sims = find_sims(symbol, args.ignore, defined) sims_out = yel("Similar symbols") if sims: print("%s: %s" % (sims_out, ', '.join(sims))) else: print("%s: %s" % (sims_out, "no similar symbols found")) if args.find: print("%s:" % yel("Commits changing symbol")) commits = find_commits(symbol, args.diff) if commits: for commit in commits: commit = commit.split(" ", 1) print("\t- %s (\"%s\")" % (yel(commit[0]), commit[1])) else: print("\t- no commit found") print() # new line def reset(commit): """Reset current git tree to %commit.""" execute(["git", "reset", "--hard", commit]) def yel(string): """ Color %string yellow. """ return "\033[33m%s\033[0m" % string if COLOR else string def red(string): """ Color %string red. """ return "\033[31m%s\033[0m" % string if COLOR else string def execute(cmd): """Execute %cmd and return stdout. Exit in case of error.""" try: stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False) stdout = stdout.decode(errors='replace') except subprocess.CalledProcessError as fail: exit(fail) return stdout def find_commits(symbol, diff): """Find commits changing %symbol in the given range of %diff.""" commits = execute(["git", "log", "--pretty=oneline", "--abbrev-commit", "-G", symbol, diff]) return [x for x in commits.split("\n") if x] def tree_is_dirty(): """Return true if the current working tree is dirty (i.e., if any file has been added, deleted, modified, renamed or copied but not committed).""" stdout = execute(["git", "status", "--porcelain"]) for line in stdout: if re.findall(r"[URMADC]{1}", line[:2]): return True return False def get_head(): """Return commit hash of current HEAD.""" stdout = execute(["git", "rev-parse", "HEAD"]) return stdout.strip('\n') def partition(lst, size): """Partition list @lst into eveni-sized lists of size @size.""" return [lst[i::size] for i in range(size)] def init_worker(): """Set signal handler to ignore SIGINT.""" signal.signal(signal.SIGINT, signal.SIG_IGN) def find_sims(symbol, ignore, defined=[]): """Return a list of max. ten Kconfig symbols that are string-similar to @symbol.""" if defined: return difflib.get_close_matches(symbol, set(defined), 10) pool = Pool(cpu_count(), init_worker) kfiles = [] for gitfile in get_files(): if REGEX_FILE_KCONFIG.match(gitfile): kfiles.append(gitfile) arglist = [] for part in partition(kfiles, cpu_count()): arglist.append((part, ignore)) for res in pool.map(parse_kconfig_files, arglist): defined.extend(res[0]) return difflib.get_close_matches(symbol, set(defined), 10) def get_files(): """Return a list of all files in the current git directory.""" # use 'git ls-files' to get the worklist stdout = execute(["git", "ls-files"]) if len(stdout) > 0 and stdout[-1] == "\n": stdout = stdout[:-1] files = [] for gitfile in stdout.rsplit("\n"): if ".git" in gitfile or "ChangeLog" in gitfile or \ ".log" in gitfile or os.path.isdir(gitfile) or \ gitfile.startswith("tools/"): continue files.append(gitfile) return files def check_symbols(ignore): """Find undefined Kconfig symbols and return a dict with the symbol as key and a list of referencing files as value. Files matching %ignore are not checked for undefined symbols.""" pool = Pool(cpu_count(), init_worker) try: return check_symbols_helper(pool, ignore) except KeyboardInterrupt: pool.terminate() pool.join() sys.exit(1) def check_symbols_helper(pool, ignore): """Helper method for check_symbols(). Used to catch keyboard interrupts in check_symbols() in order to properly terminate running worker processes.""" source_files = [] kconfig_files = [] defined_symbols = [] referenced_symbols = dict() # {file: [symbols]} for gitfile in get_files(): if REGEX_FILE_KCONFIG.match(gitfile): kconfig_files.append(gitfile) else: if ignore and re.match(ignore, gitfile): continue # add source files that do not match the ignore pattern source_files.append(gitfile) # parse source files arglist = partition(source_files, cpu_count()) for res in pool.map(parse_source_files, arglist): referenced_symbols.update(res) # parse kconfig files arglist = [] for part in partition(kconfig_files, cpu_count()): arglist.append((part, ignore)) for res in pool.map(parse_kconfig_files, arglist): defined_symbols.extend(res[0]) referenced_symbols.update(res[1]) defined_symbols = set(defined_symbols) # inverse mapping of referenced_symbols to dict(symbol: [files]) inv_map = dict() for _file, symbols in referenced_symbols.items(): for symbol in symbols: inv_map[symbol] = inv_map.get(symbol, set()) inv_map[symbol].add(_file) referenced_symbols = inv_map undefined = {} # {symbol: [files]} for symbol in sorted(referenced_symbols): # filter some false positives if symbol == "FOO" or symbol == "BAR" or \ symbol == "FOO_BAR" or symbol == "XXX": continue if symbol not in defined_symbols: if symbol.endswith("_MODULE"): # avoid false positives for kernel modules if symbol[:-len("_MODULE")] in defined_symbols: continue undefined[symbol] = referenced_symbols.get(symbol) return undefined, defined_symbols def parse_source_files(source_files): """Parse each source file in @source_files and return dictionary with source files as keys and lists of references Kconfig symbols as values.""" referenced_symbols = dict() for sfile in source_files: referenced_symbols[sfile] = parse_source_file(sfile) return referenced_symbols def parse_source_file(sfile): """Parse @sfile and return a list of referenced Kconfig symbols.""" lines = [] references = [] if not os.path.exists(sfile): return references with open(sfile, "r", encoding='utf-8', errors='replace') as stream: lines = stream.readlines() for line in lines: if "CONFIG_" not in line: continue symbols = REGEX_SOURCE_SYMBOL.findall(line) for symbol in symbols: if not REGEX_FILTER_SYMBOLS.search(symbol): continue references.append(symbol) return references def get_symbols_in_line(line): """Return mentioned Kconfig symbols in @line.""" return REGEX_SYMBOL.findall(line) def parse_kconfig_files(args): """Parse kconfig files and return tuple of defined and references Kconfig symbols. Note, @args is a tuple of a list of files and the @ignore pattern.""" kconfig_files = args[0] ignore = args[1] defined_symbols = [] referenced_symbols = dict() for kfile in kconfig_files: defined, references = parse_kconfig_file(kfile) defined_symbols.extend(defined) if ignore and re.match(ignore, kfile): # do not collect references for files that match the ignore pattern continue referenced_symbols[kfile] = references return (defined_symbols, referenced_symbols) def parse_kconfig_file(kfile): """Parse @kfile and update symbol definitions and references.""" lines = [] defined = [] references = [] if not os.path.exists(kfile): return defined, references with open(kfile, "r", encoding='utf-8', errors='replace') as stream: lines = stream.readlines() for i in range(len(lines)): line = lines[i] line = line.strip('\n') line = line.split("#")[0] # ignore comments if REGEX_KCONFIG_DEF.match(line): symbol_def = REGEX_KCONFIG_DEF.findall(line) defined.append(symbol_def[0]) elif REGEX_KCONFIG_STMT.match(line): line = REGEX_QUOTES.sub("", line) symbols = get_symbols_in_line(line) # multi-line statements while line.endswith("\\"): i += 1 line = lines[i] line = line.strip('\n') symbols.extend(get_symbols_in_line(line)) for symbol in set(symbols): if REGEX_NUMERIC.match(symbol): # ignore numeric values continue references.append(symbol) return defined, references def main(): try: print_undefined_symbols() except BrokenPipeError: # Python flushes standard streams on exit; redirect remaining output # to devnull to avoid another BrokenPipeError at shutdown devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) sys.exit(1) # Python exits with error code 1 on EPIPE if __name__ == "__main__": main()
grace-kernel-grace-kernel-6.1.y
scripts/checkkconfigsymbols.py
#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # Copyright Thomas Gleixner <[email protected]> from argparse import ArgumentParser from ply import lex, yacc import locale import traceback import fnmatch import sys import git import re import os class ParserException(Exception): def __init__(self, tok, txt): self.tok = tok self.txt = txt class SPDXException(Exception): def __init__(self, el, txt): self.el = el self.txt = txt class SPDXdata(object): def __init__(self): self.license_files = 0 self.exception_files = 0 self.licenses = [ ] self.exceptions = { } class dirinfo(object): def __init__(self): self.missing = 0 self.total = 0 self.files = [] def update(self, fname, basedir, miss): self.total += 1 self.missing += miss if miss: fname = './' + fname bdir = os.path.dirname(fname) if bdir == basedir.rstrip('/'): self.files.append(fname) # Read the spdx data from the LICENSES directory def read_spdxdata(repo): # The subdirectories of LICENSES in the kernel source # Note: exceptions needs to be parsed as last directory. license_dirs = [ "preferred", "dual", "deprecated", "exceptions" ] lictree = repo.head.commit.tree['LICENSES'] spdx = SPDXdata() for d in license_dirs: for el in lictree[d].traverse(): if not os.path.isfile(el.path): continue exception = None for l in open(el.path, encoding="utf-8").readlines(): if l.startswith('Valid-License-Identifier:'): lid = l.split(':')[1].strip().upper() if lid in spdx.licenses: raise SPDXException(el, 'Duplicate License Identifier: %s' %lid) else: spdx.licenses.append(lid) elif l.startswith('SPDX-Exception-Identifier:'): exception = l.split(':')[1].strip().upper() spdx.exceptions[exception] = [] elif l.startswith('SPDX-Licenses:'): for lic in l.split(':')[1].upper().strip().replace(' ', '').replace('\t', '').split(','): if not lic in spdx.licenses: raise SPDXException(None, 'Exception %s missing license %s' %(exception, lic)) spdx.exceptions[exception].append(lic) elif l.startswith("License-Text:"): if exception: if not len(spdx.exceptions[exception]): raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %exception) spdx.exception_files += 1 else: spdx.license_files += 1 break return spdx class id_parser(object): reserved = [ 'AND', 'OR', 'WITH' ] tokens = [ 'LPAR', 'RPAR', 'ID', 'EXC' ] + reserved precedence = ( ('nonassoc', 'AND', 'OR'), ) t_ignore = ' \t' def __init__(self, spdx): self.spdx = spdx self.lasttok = None self.lastid = None self.lexer = lex.lex(module = self, reflags = re.UNICODE) # Initialize the parser. No debug file and no parser rules stored on disk # The rules are small enough to be generated on the fly self.parser = yacc.yacc(module = self, write_tables = False, debug = False) self.lines_checked = 0 self.checked = 0 self.excluded = 0 self.spdx_valid = 0 self.spdx_errors = 0 self.spdx_dirs = {} self.dirdepth = -1 self.basedir = '.' self.curline = 0 self.deepest = 0 def set_dirinfo(self, basedir, dirdepth): if dirdepth >= 0: self.basedir = basedir bdir = basedir.lstrip('./').rstrip('/') if bdir != '': parts = bdir.split('/') else: parts = [] self.dirdepth = dirdepth + len(parts) # Validate License and Exception IDs def validate(self, tok): id = tok.value.upper() if tok.type == 'ID': if not id in self.spdx.licenses: raise ParserException(tok, 'Invalid License ID') self.lastid = id elif tok.type == 'EXC': if id not in self.spdx.exceptions: raise ParserException(tok, 'Invalid Exception ID') if self.lastid not in self.spdx.exceptions[id]: raise ParserException(tok, 'Exception not valid for license %s' %self.lastid) self.lastid = None elif tok.type != 'WITH': self.lastid = None # Lexer functions def t_RPAR(self, tok): r'\)' self.lasttok = tok.type return tok def t_LPAR(self, tok): r'\(' self.lasttok = tok.type return tok def t_ID(self, tok): r'[A-Za-z.0-9\-+]+' if self.lasttok == 'EXC': print(tok) raise ParserException(tok, 'Missing parentheses') tok.value = tok.value.strip() val = tok.value.upper() if val in self.reserved: tok.type = val elif self.lasttok == 'WITH': tok.type = 'EXC' self.lasttok = tok.type self.validate(tok) return tok def t_error(self, tok): raise ParserException(tok, 'Invalid token') def p_expr(self, p): '''expr : ID | ID WITH EXC | expr AND expr | expr OR expr | LPAR expr RPAR''' pass def p_error(self, p): if not p: raise ParserException(None, 'Unfinished license expression') else: raise ParserException(p, 'Syntax error') def parse(self, expr): self.lasttok = None self.lastid = None self.parser.parse(expr, lexer = self.lexer) def parse_lines(self, fd, maxlines, fname): self.checked += 1 self.curline = 0 fail = 1 try: for line in fd: line = line.decode(locale.getpreferredencoding(False), errors='ignore') self.curline += 1 if self.curline > maxlines: break self.lines_checked += 1 if line.find("SPDX-License-Identifier:") < 0: continue expr = line.split(':')[1].strip() # Remove trailing comment closure if line.strip().endswith('*/'): expr = expr.rstrip('*/').strip() # Remove trailing xml comment closure if line.strip().endswith('-->'): expr = expr.rstrip('-->').strip() # Special case for SH magic boot code files if line.startswith('LIST \"'): expr = expr.rstrip('\"').strip() self.parse(expr) self.spdx_valid += 1 # # Should we check for more SPDX ids in the same file and # complain if there are any? # fail = 0 break except ParserException as pe: if pe.tok: col = line.find(expr) + pe.tok.lexpos tok = pe.tok.value sys.stdout.write('%s: %d:%d %s: %s\n' %(fname, self.curline, col, pe.txt, tok)) else: sys.stdout.write('%s: %d:0 %s\n' %(fname, self.curline, pe.txt)) self.spdx_errors += 1 if fname == '-': return base = os.path.dirname(fname) if self.dirdepth > 0: parts = base.split('/') i = 0 base = '.' while i < self.dirdepth and i < len(parts) and len(parts[i]): base += '/' + parts[i] i += 1 elif self.dirdepth == 0: base = self.basedir else: base = './' + base.rstrip('/') base += '/' di = self.spdx_dirs.get(base, dirinfo()) di.update(fname, base, fail) self.spdx_dirs[base] = di class pattern(object): def __init__(self, line): self.pattern = line self.match = self.match_file if line == '.*': self.match = self.match_dot elif line.endswith('/'): self.pattern = line[:-1] self.match = self.match_dir elif line.startswith('/'): self.pattern = line[1:] self.match = self.match_fn def match_dot(self, fpath): return os.path.basename(fpath).startswith('.') def match_file(self, fpath): return os.path.basename(fpath) == self.pattern def match_fn(self, fpath): return fnmatch.fnmatchcase(fpath, self.pattern) def match_dir(self, fpath): if self.match_fn(os.path.dirname(fpath)): return True return fpath.startswith(self.pattern) def exclude_file(fpath): for rule in exclude_rules: if rule.match(fpath): return True return False def scan_git_tree(tree, basedir, dirdepth): parser.set_dirinfo(basedir, dirdepth) for el in tree.traverse(): if not os.path.isfile(el.path): continue if exclude_file(el.path): parser.excluded += 1 continue with open(el.path, 'rb') as fd: parser.parse_lines(fd, args.maxlines, el.path) def scan_git_subtree(tree, path, dirdepth): for p in path.strip('/').split('/'): tree = tree[p] scan_git_tree(tree, path.strip('/'), dirdepth) def read_exclude_file(fname): rules = [] if not fname: return rules with open(fname) as fd: for line in fd: line = line.strip() if line.startswith('#'): continue if not len(line): continue rules.append(pattern(line)) return rules if __name__ == '__main__': ap = ArgumentParser(description='SPDX expression checker') ap.add_argument('path', nargs='*', help='Check path or file. If not given full git tree scan. For stdin use "-"') ap.add_argument('-d', '--dirs', action='store_true', help='Show [sub]directory statistics.') ap.add_argument('-D', '--depth', type=int, default=-1, help='Directory depth for -d statistics. Default: unlimited') ap.add_argument('-e', '--exclude', help='File containing file patterns to exclude. Default: scripts/spdxexclude') ap.add_argument('-f', '--files', action='store_true', help='Show files without SPDX.') ap.add_argument('-m', '--maxlines', type=int, default=15, help='Maximum number of lines to scan in a file. Default 15') ap.add_argument('-v', '--verbose', action='store_true', help='Verbose statistics output') args = ap.parse_args() # Sanity check path arguments if '-' in args.path and len(args.path) > 1: sys.stderr.write('stdin input "-" must be the only path argument\n') sys.exit(1) try: # Use git to get the valid license expressions repo = git.Repo(os.getcwd()) assert not repo.bare # Initialize SPDX data spdx = read_spdxdata(repo) # Initialize the parser parser = id_parser(spdx) except SPDXException as se: if se.el: sys.stderr.write('%s: %s\n' %(se.el.path, se.txt)) else: sys.stderr.write('%s\n' %se.txt) sys.exit(1) except Exception as ex: sys.stderr.write('FAIL: %s\n' %ex) sys.stderr.write('%s\n' %traceback.format_exc()) sys.exit(1) try: fname = args.exclude if not fname: fname = os.path.join(os.path.dirname(__file__), 'spdxexclude') exclude_rules = read_exclude_file(fname) except Exception as ex: sys.stderr.write('FAIL: Reading exclude file %s: %s\n' %(fname, ex)) sys.exit(1) try: if len(args.path) and args.path[0] == '-': stdin = os.fdopen(sys.stdin.fileno(), 'rb') parser.parse_lines(stdin, args.maxlines, '-') else: if args.path: for p in args.path: if os.path.isfile(p): parser.parse_lines(open(p, 'rb'), args.maxlines, p) elif os.path.isdir(p): scan_git_subtree(repo.head.reference.commit.tree, p, args.depth) else: sys.stderr.write('path %s does not exist\n' %p) sys.exit(1) else: # Full git tree scan scan_git_tree(repo.head.commit.tree, '.', args.depth) ndirs = len(parser.spdx_dirs) dirsok = 0 if ndirs: for di in parser.spdx_dirs.values(): if not di.missing: dirsok += 1 if args.verbose: sys.stderr.write('\n') sys.stderr.write('License files: %12d\n' %spdx.license_files) sys.stderr.write('Exception files: %12d\n' %spdx.exception_files) sys.stderr.write('License IDs %12d\n' %len(spdx.licenses)) sys.stderr.write('Exception IDs %12d\n' %len(spdx.exceptions)) sys.stderr.write('\n') sys.stderr.write('Files excluded: %12d\n' %parser.excluded) sys.stderr.write('Files checked: %12d\n' %parser.checked) sys.stderr.write('Lines checked: %12d\n' %parser.lines_checked) if parser.checked: pc = int(100 * parser.spdx_valid / parser.checked) sys.stderr.write('Files with SPDX: %12d %3d%%\n' %(parser.spdx_valid, pc)) sys.stderr.write('Files with errors: %12d\n' %parser.spdx_errors) if ndirs: sys.stderr.write('\n') sys.stderr.write('Directories accounted: %8d\n' %ndirs) pc = int(100 * dirsok / ndirs) sys.stderr.write('Directories complete: %8d %3d%%\n' %(dirsok, pc)) if ndirs and ndirs != dirsok and args.dirs: if args.verbose: sys.stderr.write('\n') sys.stderr.write('Incomplete directories: SPDX in Files\n') for f in sorted(parser.spdx_dirs.keys()): di = parser.spdx_dirs[f] if di.missing: valid = di.total - di.missing pc = int(100 * valid / di.total) sys.stderr.write(' %-80s: %5d of %5d %3d%%\n' %(f, valid, di.total, pc)) if ndirs and ndirs != dirsok and args.files: if args.verbose or args.dirs: sys.stderr.write('\n') sys.stderr.write('Files without SPDX:\n') for f in sorted(parser.spdx_dirs.keys()): di = parser.spdx_dirs[f] for f in sorted(di.files): sys.stderr.write(' %s\n' %f) sys.exit(0) except Exception as ex: sys.stderr.write('FAIL: %s\n' %ex) sys.stderr.write('%s\n' %traceback.format_exc()) sys.exit(1)
grace-kernel-grace-kernel-6.1.y
scripts/spdxcheck.py
#!/usr/bin/env python # SPDX-License-Identifier: GPL-2.0-only """ Copyright 2008 (c) Frederic Weisbecker <[email protected]> This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(2), m.group(3), m.group(4)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print(CallTree.ROOT) if __name__ == "__main__": main()
grace-kernel-grace-kernel-6.1.y
scripts/tracing/draw_functrace.py
# # gdb helper commands and functions for Linux kernel debugging # # loader module # # Copyright (c) Siemens AG, 2012, 2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import os sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/scripts/gdb") try: gdb.parse_and_eval("0") gdb.execute("", to_string=True) except: gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to " "work.\n") else: import linux.constants if linux.constants.LX_CONFIG_DEBUG_INFO_REDUCED: raise gdb.GdbError("Reduced debug information will prevent GDB " "from having complete types.\n") import linux.utils import linux.symbols import linux.modules import linux.dmesg import linux.tasks import linux.config import linux.cpus import linux.lists import linux.rbtree import linux.proc import linux.timerlist import linux.clk import linux.genpd import linux.device
grace-kernel-grace-kernel-6.1.y
scripts/gdb/vmlinux-gdb.py
# # gdb helper commands and functions for Linux kernel debugging # # list tools # # Copyright (c) Thiebaud Weksteen, 2015 # # Authors: # Thiebaud Weksteen <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb from linux import utils list_head = utils.CachedType("struct list_head") hlist_head = utils.CachedType("struct hlist_head") hlist_node = utils.CachedType("struct hlist_node") def list_for_each(head): if head.type == list_head.get_type().pointer(): head = head.dereference() elif head.type != list_head.get_type(): raise TypeError("Must be struct list_head not {}" .format(head.type)) if head['next'] == 0: gdb.write("list_for_each: Uninitialized list '{}' treated as empty\n" .format(head.address)) return node = head['next'].dereference() while node.address != head.address: yield node.address node = node['next'].dereference() def list_for_each_entry(head, gdbtype, member): for node in list_for_each(head): yield utils.container_of(node, gdbtype, member) def hlist_for_each(head): if head.type == hlist_head.get_type().pointer(): head = head.dereference() elif head.type != hlist_head.get_type(): raise TypeError("Must be struct hlist_head not {}" .format(head.type)) node = head['first'].dereference() while node.address: yield node.address node = node['next'].dereference() def hlist_for_each_entry(head, gdbtype, member): for node in hlist_for_each(head): yield utils.container_of(node, gdbtype, member) def list_check(head): nb = 0 if (head.type == list_head.get_type().pointer()): head = head.dereference() elif (head.type != list_head.get_type()): raise gdb.GdbError('argument must be of type (struct list_head [*])') c = head try: gdb.write("Starting with: {}\n".format(c)) except gdb.MemoryError: gdb.write('head is not accessible\n') return while True: p = c['prev'].dereference() n = c['next'].dereference() try: if p['next'] != c.address: gdb.write('prev.next != current: ' 'current@{current_addr}={current} ' 'prev@{p_addr}={p}\n'.format( current_addr=c.address, current=c, p_addr=p.address, p=p, )) return except gdb.MemoryError: gdb.write('prev is not accessible: ' 'current@{current_addr}={current}\n'.format( current_addr=c.address, current=c )) return try: if n['prev'] != c.address: gdb.write('next.prev != current: ' 'current@{current_addr}={current} ' 'next@{n_addr}={n}\n'.format( current_addr=c.address, current=c, n_addr=n.address, n=n, )) return except gdb.MemoryError: gdb.write('next is not accessible: ' 'current@{current_addr}={current}\n'.format( current_addr=c.address, current=c )) return c = n nb += 1 if c == head: gdb.write("list is consistent: {} node(s)\n".format(nb)) return class LxListChk(gdb.Command): """Verify a list consistency""" def __init__(self): super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA, gdb.COMPLETE_EXPRESSION) def invoke(self, arg, from_tty): argv = gdb.string_to_argv(arg) if len(argv) != 1: raise gdb.GdbError("lx-list-check takes one argument") list_check(gdb.parse_and_eval(argv[0])) LxListChk()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/lists.py
# # gdb helper commands and functions for Linux kernel debugging # # task & thread tools # # Copyright (c) Siemens AG, 2011-2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb from linux import utils task_type = utils.CachedType("struct task_struct") def task_lists(): task_ptr_type = task_type.get_type().pointer() init_task = gdb.parse_and_eval("init_task").address t = g = init_task while True: while True: yield t t = utils.container_of(t['thread_group']['next'], task_ptr_type, "thread_group") if t == g: break t = g = utils.container_of(g['tasks']['next'], task_ptr_type, "tasks") if t == init_task: return def get_task_by_pid(pid): for task in task_lists(): if int(task['pid']) == pid: return task return None class LxTaskByPidFunc(gdb.Function): """Find Linux task by PID and return the task_struct variable. $lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and return that task_struct variable which PID matches.""" def __init__(self): super(LxTaskByPidFunc, self).__init__("lx_task_by_pid") def invoke(self, pid): task = get_task_by_pid(pid) if task: return task.dereference() else: raise gdb.GdbError("No task of PID " + str(pid)) LxTaskByPidFunc() class LxPs(gdb.Command): """Dump Linux tasks.""" def __init__(self): super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): gdb.write("{:>10} {:>12} {:>7}\n".format("TASK", "PID", "COMM")) for task in task_lists(): gdb.write("{} {:^5} {}\n".format( task.format_string().split()[0], task["pid"].format_string(), task["comm"].string())) LxPs() thread_info_type = utils.CachedType("struct thread_info") ia64_task_size = None def get_thread_info(task): thread_info_ptr_type = thread_info_type.get_type().pointer() if utils.is_target_arch("ia64"): global ia64_task_size if ia64_task_size is None: ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)") thread_info_addr = task.address + ia64_task_size thread_info = thread_info_addr.cast(thread_info_ptr_type) else: if task.type.fields()[0].type == thread_info_type.get_type(): return task['thread_info'] thread_info = task['stack'].cast(thread_info_ptr_type) return thread_info.dereference() class LxThreadInfoFunc (gdb.Function): """Calculate Linux thread_info from task variable. $lx_thread_info(TASK): Given TASK, return the corresponding thread_info variable.""" def __init__(self): super(LxThreadInfoFunc, self).__init__("lx_thread_info") def invoke(self, task): return get_thread_info(task) LxThreadInfoFunc() class LxThreadInfoByPidFunc (gdb.Function): """Calculate Linux thread_info from task variable found by pid $lx_thread_info_by_pid(PID): Given PID, return the corresponding thread_info variable.""" def __init__(self): super(LxThreadInfoByPidFunc, self).__init__("lx_thread_info_by_pid") def invoke(self, pid): task = get_task_by_pid(pid) if task: return get_thread_info(task.dereference()) else: raise gdb.GdbError("No task of PID " + str(pid)) LxThreadInfoByPidFunc()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/tasks.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright 2019 Google LLC. import gdb import zlib from linux import utils class LxConfigDump(gdb.Command): """Output kernel config to the filename specified as the command argument. Equivalent to 'zcat /proc/config.gz > config.txt' on a running target""" def __init__(self): super(LxConfigDump, self).__init__("lx-configdump", gdb.COMMAND_DATA, gdb.COMPLETE_FILENAME) def invoke(self, arg, from_tty): if len(arg) == 0: filename = "config.txt" else: filename = arg try: py_config_ptr = gdb.parse_and_eval("&kernel_config_data") py_config_ptr_end = gdb.parse_and_eval("&kernel_config_data_end") py_config_size = py_config_ptr_end - py_config_ptr except gdb.error as e: raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?") inf = gdb.inferiors()[0] zconfig_buf = utils.read_memoryview(inf, py_config_ptr, py_config_size).tobytes() config_buf = zlib.decompress(zconfig_buf, 16) with open(filename, 'wb') as f: f.write(config_buf) gdb.write("Dumped config to " + filename + "\n") LxConfigDump()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/config.py
# # gdb helper commands and functions for Linux kernel debugging # # kernel log buffer dump # # Copyright (c) Siemens AG, 2011, 2012 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb import sys from linux import utils printk_info_type = utils.CachedType("struct printk_info") prb_data_blk_lpos_type = utils.CachedType("struct prb_data_blk_lpos") prb_desc_type = utils.CachedType("struct prb_desc") prb_desc_ring_type = utils.CachedType("struct prb_desc_ring") prb_data_ring_type = utils.CachedType("struct prb_data_ring") printk_ringbuffer_type = utils.CachedType("struct printk_ringbuffer") class LxDmesg(gdb.Command): """Print Linux kernel log buffer.""" def __init__(self): super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): inf = gdb.inferiors()[0] # read in prb structure prb_addr = int(str(gdb.parse_and_eval("(void *)'printk.c'::prb")).split()[0], 16) sz = printk_ringbuffer_type.get_type().sizeof prb = utils.read_memoryview(inf, prb_addr, sz).tobytes() # read in descriptor ring structure off = printk_ringbuffer_type.get_type()['desc_ring'].bitpos // 8 addr = prb_addr + off sz = prb_desc_ring_type.get_type().sizeof desc_ring = utils.read_memoryview(inf, addr, sz).tobytes() # read in descriptor count, size, and address off = prb_desc_ring_type.get_type()['count_bits'].bitpos // 8 desc_ring_count = 1 << utils.read_u32(desc_ring, off) desc_sz = prb_desc_type.get_type().sizeof off = prb_desc_ring_type.get_type()['descs'].bitpos // 8 desc_addr = utils.read_ulong(desc_ring, off) # read in info size and address info_sz = printk_info_type.get_type().sizeof off = prb_desc_ring_type.get_type()['infos'].bitpos // 8 info_addr = utils.read_ulong(desc_ring, off) # read in text data ring structure off = printk_ringbuffer_type.get_type()['text_data_ring'].bitpos // 8 addr = prb_addr + off sz = prb_data_ring_type.get_type().sizeof text_data_ring = utils.read_memoryview(inf, addr, sz).tobytes() # read in text data size and address off = prb_data_ring_type.get_type()['size_bits'].bitpos // 8 text_data_sz = 1 << utils.read_u32(text_data_ring, off) off = prb_data_ring_type.get_type()['data'].bitpos // 8 text_data_addr = utils.read_ulong(text_data_ring, off) sv_off = prb_desc_type.get_type()['state_var'].bitpos // 8 off = prb_desc_type.get_type()['text_blk_lpos'].bitpos // 8 begin_off = off + (prb_data_blk_lpos_type.get_type()['begin'].bitpos // 8) next_off = off + (prb_data_blk_lpos_type.get_type()['next'].bitpos // 8) ts_off = printk_info_type.get_type()['ts_nsec'].bitpos // 8 len_off = printk_info_type.get_type()['text_len'].bitpos // 8 # definitions from kernel/printk/printk_ringbuffer.h desc_committed = 1 desc_finalized = 2 desc_sv_bits = utils.get_long_type().sizeof * 8 desc_flags_shift = desc_sv_bits - 2 desc_flags_mask = 3 << desc_flags_shift desc_id_mask = ~desc_flags_mask # read in tail and head descriptor ids off = prb_desc_ring_type.get_type()['tail_id'].bitpos // 8 tail_id = utils.read_atomic_long(desc_ring, off) off = prb_desc_ring_type.get_type()['head_id'].bitpos // 8 head_id = utils.read_atomic_long(desc_ring, off) did = tail_id while True: ind = did % desc_ring_count desc_off = desc_sz * ind info_off = info_sz * ind desc = utils.read_memoryview(inf, desc_addr + desc_off, desc_sz).tobytes() # skip non-committed record state = 3 & (utils.read_atomic_long(desc, sv_off) >> desc_flags_shift) if state != desc_committed and state != desc_finalized: if did == head_id: break did = (did + 1) & desc_id_mask continue begin = utils.read_ulong(desc, begin_off) % text_data_sz end = utils.read_ulong(desc, next_off) % text_data_sz info = utils.read_memoryview(inf, info_addr + info_off, info_sz).tobytes() # handle data-less record if begin & 1 == 1: text = "" else: # handle wrapping data block if begin > end: begin = 0 # skip over descriptor id text_start = begin + utils.get_long_type().sizeof text_len = utils.read_u16(info, len_off) # handle truncated message if end - text_start < text_len: text_len = end - text_start text_data = utils.read_memoryview(inf, text_data_addr + text_start, text_len).tobytes() text = text_data[0:text_len].decode(encoding='utf8', errors='replace') time_stamp = utils.read_u64(info, ts_off) for line in text.splitlines(): msg = u"[{time:12.6f}] {line}\n".format( time=time_stamp / 1000000000.0, line=line) # With python2 gdb.write will attempt to convert unicode to # ascii and might fail so pass an utf8-encoded str instead. if sys.hexversion < 0x03000000: msg = msg.encode(encoding='utf8', errors='replace') gdb.write(msg) if did == head_id: break did = (did + 1) & desc_id_mask LxDmesg()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/dmesg.py
# # gdb helper commands and functions for Linux kernel debugging # # Kernel proc information reader # # Copyright (c) 2016 Linaro Ltd # # Authors: # Kieran Bingham <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb from linux import constants from linux import utils from linux import tasks from linux import lists from struct import * class LxCmdLine(gdb.Command): """ Report the Linux Commandline used in the current kernel. Equivalent to cat /proc/cmdline on a running target""" def __init__(self): super(LxCmdLine, self).__init__("lx-cmdline", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): gdb.write(gdb.parse_and_eval("saved_command_line").string() + "\n") LxCmdLine() class LxVersion(gdb.Command): """ Report the Linux Version of the current kernel. Equivalent to cat /proc/version on a running target""" def __init__(self): super(LxVersion, self).__init__("lx-version", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): # linux_banner should contain a newline gdb.write(gdb.parse_and_eval("(char *)linux_banner").string()) LxVersion() # Resource Structure Printers # /proc/iomem # /proc/ioports def get_resources(resource, depth): while resource: yield resource, depth child = resource['child'] if child: for res, deep in get_resources(child, depth + 1): yield res, deep resource = resource['sibling'] def show_lx_resources(resource_str): resource = gdb.parse_and_eval(resource_str) width = 4 if resource['end'] < 0x10000 else 8 # Iterate straight to the first child for res, depth in get_resources(resource['child'], 0): start = int(res['start']) end = int(res['end']) gdb.write(" " * depth * 2 + "{0:0{1}x}-".format(start, width) + "{0:0{1}x} : ".format(end, width) + res['name'].string() + "\n") class LxIOMem(gdb.Command): """Identify the IO memory resource locations defined by the kernel Equivalent to cat /proc/iomem on a running target""" def __init__(self): super(LxIOMem, self).__init__("lx-iomem", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): return show_lx_resources("iomem_resource") LxIOMem() class LxIOPorts(gdb.Command): """Identify the IO port resource locations defined by the kernel Equivalent to cat /proc/ioports on a running target""" def __init__(self): super(LxIOPorts, self).__init__("lx-ioports", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): return show_lx_resources("ioport_resource") LxIOPorts() # Mount namespace viewer # /proc/mounts def info_opts(lst, opt): opts = "" for key, string in lst.items(): if opt & key: opts += string return opts FS_INFO = {constants.LX_SB_SYNCHRONOUS: ",sync", constants.LX_SB_MANDLOCK: ",mand", constants.LX_SB_DIRSYNC: ",dirsync", constants.LX_SB_NOATIME: ",noatime", constants.LX_SB_NODIRATIME: ",nodiratime"} MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid", constants.LX_MNT_NODEV: ",nodev", constants.LX_MNT_NOEXEC: ",noexec", constants.LX_MNT_NOATIME: ",noatime", constants.LX_MNT_NODIRATIME: ",nodiratime", constants.LX_MNT_RELATIME: ",relatime"} mount_type = utils.CachedType("struct mount") mount_ptr_type = mount_type.get_type().pointer() class LxMounts(gdb.Command): """Report the VFS mounts of the current process namespace. Equivalent to cat /proc/mounts on a running target An integer value can be supplied to display the mount values of that process namespace""" def __init__(self): super(LxMounts, self).__init__("lx-mounts", gdb.COMMAND_DATA) # Equivalent to proc_namespace.c:show_vfsmnt # However, that has the ability to call into s_op functions # whereas we cannot and must make do with the information we can obtain. def invoke(self, arg, from_tty): argv = gdb.string_to_argv(arg) if len(argv) >= 1: try: pid = int(argv[0]) except gdb.error: raise gdb.GdbError("Provide a PID as integer value") else: pid = 1 task = tasks.get_task_by_pid(pid) if not task: raise gdb.GdbError("Couldn't find a process with PID {}" .format(pid)) namespace = task['nsproxy']['mnt_ns'] if not namespace: raise gdb.GdbError("No namespace for current process") gdb.write("{:^18} {:^15} {:>9} {} {} options\n".format( "mount", "super_block", "devname", "pathname", "fstype")) for vfs in lists.list_for_each_entry(namespace['list'], mount_ptr_type, "mnt_list"): devname = vfs['mnt_devname'].string() devname = devname if devname else "none" pathname = "" parent = vfs while True: mntpoint = parent['mnt_mountpoint'] pathname = utils.dentry_name(mntpoint) + pathname if (parent == parent['mnt_parent']): break parent = parent['mnt_parent'] if (pathname == ""): pathname = "/" superblock = vfs['mnt']['mnt_sb'] fstype = superblock['s_type']['name'].string() s_flags = int(superblock['s_flags']) m_flags = int(vfs['mnt']['mnt_flags']) rd = "ro" if (s_flags & constants.LX_SB_RDONLY) else "rw" gdb.write("{} {} {} {} {} {}{}{} 0 0\n".format( vfs.format_string(), superblock.format_string(), devname, pathname, fstype, rd, info_opts(FS_INFO, s_flags), info_opts(MNT_INFO, m_flags))) LxMounts() class LxFdtDump(gdb.Command): """Output Flattened Device Tree header and dump FDT blob to the filename specified as the command argument. Equivalent to 'cat /proc/fdt > fdtdump.dtb' on a running target""" def __init__(self): super(LxFdtDump, self).__init__("lx-fdtdump", gdb.COMMAND_DATA, gdb.COMPLETE_FILENAME) def fdthdr_to_cpu(self, fdt_header): fdt_header_be = ">IIIIIII" fdt_header_le = "<IIIIIII" if utils.get_target_endianness() == 1: output_fmt = fdt_header_le else: output_fmt = fdt_header_be return unpack(output_fmt, pack(fdt_header_be, fdt_header['magic'], fdt_header['totalsize'], fdt_header['off_dt_struct'], fdt_header['off_dt_strings'], fdt_header['off_mem_rsvmap'], fdt_header['version'], fdt_header['last_comp_version'])) def invoke(self, arg, from_tty): if not constants.LX_CONFIG_OF: raise gdb.GdbError("Kernel not compiled with CONFIG_OF\n") if len(arg) == 0: filename = "fdtdump.dtb" else: filename = arg py_fdt_header_ptr = gdb.parse_and_eval( "(const struct fdt_header *) initial_boot_params") py_fdt_header = py_fdt_header_ptr.dereference() fdt_header = self.fdthdr_to_cpu(py_fdt_header) if fdt_header[0] != constants.LX_OF_DT_HEADER: raise gdb.GdbError("No flattened device tree magic found\n") gdb.write("fdt_magic: 0x{:02X}\n".format(fdt_header[0])) gdb.write("fdt_totalsize: 0x{:02X}\n".format(fdt_header[1])) gdb.write("off_dt_struct: 0x{:02X}\n".format(fdt_header[2])) gdb.write("off_dt_strings: 0x{:02X}\n".format(fdt_header[3])) gdb.write("off_mem_rsvmap: 0x{:02X}\n".format(fdt_header[4])) gdb.write("version: {}\n".format(fdt_header[5])) gdb.write("last_comp_version: {}\n".format(fdt_header[6])) inf = gdb.inferiors()[0] fdt_buf = utils.read_memoryview(inf, py_fdt_header_ptr, fdt_header[1]).tobytes() try: f = open(filename, 'wb') except gdb.error: raise gdb.GdbError("Could not open file to dump fdt") f.write(fdt_buf) f.close() gdb.write("Dumped fdt blob to " + filename + "\n") LxFdtDump()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/proc.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright 2019 Google LLC. import gdb from linux import utils rb_root_type = utils.CachedType("struct rb_root") rb_node_type = utils.CachedType("struct rb_node") def rb_first(root): if root.type == rb_root_type.get_type(): node = root.address.cast(rb_root_type.get_type().pointer()) elif root.type != rb_root_type.get_type().pointer(): raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) node = root['rb_node'] if node == 0: return None while node['rb_left']: node = node['rb_left'] return node def rb_last(root): if root.type == rb_root_type.get_type(): node = root.address.cast(rb_root_type.get_type().pointer()) elif root.type != rb_root_type.get_type().pointer(): raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) node = root['rb_node'] if node == 0: return None while node['rb_right']: node = node['rb_right'] return node def rb_parent(node): parent = gdb.Value(node['__rb_parent_color'] & ~3) return parent.cast(rb_node_type.get_type().pointer()) def rb_empty_node(node): return node['__rb_parent_color'] == node.address def rb_next(node): if node.type == rb_node_type.get_type(): node = node.address.cast(rb_node_type.get_type().pointer()) elif node.type != rb_node_type.get_type().pointer(): raise gdb.GdbError("Must be struct rb_node not {}".format(node.type)) if rb_empty_node(node): return None if node['rb_right']: node = node['rb_right'] while node['rb_left']: node = node['rb_left'] return node parent = rb_parent(node) while parent and node == parent['rb_right']: node = parent parent = rb_parent(node) return parent def rb_prev(node): if node.type == rb_node_type.get_type(): node = node.address.cast(rb_node_type.get_type().pointer()) elif node.type != rb_node_type.get_type().pointer(): raise gdb.GdbError("Must be struct rb_node not {}".format(node.type)) if rb_empty_node(node): return None if node['rb_left']: node = node['rb_left'] while node['rb_right']: node = node['rb_right'] return node.dereference() parent = rb_parent(node) while parent and node == parent['rb_left'].dereference(): node = parent parent = rb_parent(node) return parent class LxRbFirst(gdb.Function): """Lookup and return a node from an RBTree $lx_rb_first(root): Return the node at the given index. If index is omitted, the root node is dereferenced and returned.""" def __init__(self): super(LxRbFirst, self).__init__("lx_rb_first") def invoke(self, root): result = rb_first(root) if result is None: raise gdb.GdbError("No entry in tree") return result LxRbFirst() class LxRbLast(gdb.Function): """Lookup and return a node from an RBTree. $lx_rb_last(root): Return the node at the given index. If index is omitted, the root node is dereferenced and returned.""" def __init__(self): super(LxRbLast, self).__init__("lx_rb_last") def invoke(self, root): result = rb_last(root) if result is None: raise gdb.GdbError("No entry in tree") return result LxRbLast() class LxRbNext(gdb.Function): """Lookup and return a node from an RBTree. $lx_rb_next(node): Return the node at the given index. If index is omitted, the root node is dereferenced and returned.""" def __init__(self): super(LxRbNext, self).__init__("lx_rb_next") def invoke(self, node): result = rb_next(node) if result is None: raise gdb.GdbError("No entry in tree") return result LxRbNext() class LxRbPrev(gdb.Function): """Lookup and return a node from an RBTree. $lx_rb_prev(node): Return the node at the given index. If index is omitted, the root node is dereferenced and returned.""" def __init__(self): super(LxRbPrev, self).__init__("lx_rb_prev") def invoke(self, node): result = rb_prev(node) if result is None: raise gdb.GdbError("No entry in tree") return result LxRbPrev()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/rbtree.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright (c) NXP 2019 import gdb from linux.utils import CachedType from linux.utils import container_of from linux.lists import list_for_each_entry device_private_type = CachedType('struct device_private') device_type = CachedType('struct device') subsys_private_type = CachedType('struct subsys_private') kobject_type = CachedType('struct kobject') kset_type = CachedType('struct kset') bus_type = CachedType('struct bus_type') class_type = CachedType('struct class') def dev_name(dev): dev_init_name = dev['init_name'] if dev_init_name: return dev_init_name.string() return dev['kobj']['name'].string() def kset_for_each_object(kset): return list_for_each_entry(kset['list'], kobject_type.get_type().pointer(), "entry") def for_each_bus(): for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')): subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj') subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys') yield subsys_priv['bus'] def for_each_class(): for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')): subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj') subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys') yield subsys_priv['class'] def get_bus_by_name(name): for item in for_each_bus(): if item['name'].string() == name: return item raise gdb.GdbError("Can't find bus type {!r}".format(name)) def get_class_by_name(name): for item in for_each_class(): if item['name'].string() == name: return item raise gdb.GdbError("Can't find device class {!r}".format(name)) klist_type = CachedType('struct klist') klist_node_type = CachedType('struct klist_node') def klist_for_each(klist): return list_for_each_entry(klist['k_list'], klist_node_type.get_type().pointer(), 'n_node') def bus_for_each_device(bus): for kn in klist_for_each(bus['p']['klist_devices']): dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus') yield dp['device'] def class_for_each_device(cls): for kn in klist_for_each(cls['p']['klist_devices']): dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class') yield dp['device'] def device_for_each_child(dev): for kn in klist_for_each(dev['p']['klist_children']): dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_parent') yield dp['device'] def _show_device(dev, level=0, recursive=False): gdb.write('{}dev {}:\t{}\n'.format('\t' * level, dev_name(dev), dev)) if recursive: for child in device_for_each_child(dev): _show_device(child, level + 1, recursive) class LxDeviceListBus(gdb.Command): '''Print devices on a bus (or all buses if not specified)''' def __init__(self): super(LxDeviceListBus, self).__init__('lx-device-list-bus', gdb.COMMAND_DATA) def invoke(self, arg, from_tty): if not arg: for bus in for_each_bus(): gdb.write('bus {}:\t{}\n'.format(bus['name'].string(), bus)) for dev in bus_for_each_device(bus): _show_device(dev, level=1) else: bus = get_bus_by_name(arg) if not bus: raise gdb.GdbError("Can't find bus {!r}".format(arg)) for dev in bus_for_each_device(bus): _show_device(dev) class LxDeviceListClass(gdb.Command): '''Print devices in a class (or all classes if not specified)''' def __init__(self): super(LxDeviceListClass, self).__init__('lx-device-list-class', gdb.COMMAND_DATA) def invoke(self, arg, from_tty): if not arg: for cls in for_each_class(): gdb.write("class {}:\t{}\n".format(cls['name'].string(), cls)) for dev in class_for_each_device(cls): _show_device(dev, level=1) else: cls = get_class_by_name(arg) for dev in class_for_each_device(cls): _show_device(dev) class LxDeviceListTree(gdb.Command): '''Print a device and its children recursively''' def __init__(self): super(LxDeviceListTree, self).__init__('lx-device-list-tree', gdb.COMMAND_DATA) def invoke(self, arg, from_tty): if not arg: raise gdb.GdbError('Please provide pointer to struct device') dev = gdb.parse_and_eval(arg) if dev.type != device_type.get_type().pointer(): raise gdb.GdbError('Please provide pointer to struct device') _show_device(dev, level=0, recursive=True) class LxDeviceFindByBusName(gdb.Function): '''Find struct device by bus and name (both strings)''' def __init__(self): super(LxDeviceFindByBusName, self).__init__('lx_device_find_by_bus_name') def invoke(self, bus, name): name = name.string() bus = get_bus_by_name(bus.string()) for dev in bus_for_each_device(bus): if dev_name(dev) == name: return dev class LxDeviceFindByClassName(gdb.Function): '''Find struct device by class and name (both strings)''' def __init__(self): super(LxDeviceFindByClassName, self).__init__('lx_device_find_by_class_name') def invoke(self, cls, name): name = name.string() cls = get_class_by_name(cls.string()) for dev in class_for_each_device(cls): if dev_name(dev) == name: return dev LxDeviceListBus() LxDeviceListClass() LxDeviceListTree() LxDeviceFindByBusName() LxDeviceFindByClassName()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/device.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright (c) NXP 2019 import gdb import sys from linux import utils, lists, constants clk_core_type = utils.CachedType("struct clk_core") def clk_core_for_each_child(hlist_head): return lists.hlist_for_each_entry(hlist_head, clk_core_type.get_type().pointer(), "child_node") class LxClkSummary(gdb.Command): """Print clk tree summary Output is a subset of /sys/kernel/debug/clk/clk_summary No calls are made during printing, instead a (c) if printed after values which are cached and potentially out of date""" def __init__(self): super(LxClkSummary, self).__init__("lx-clk-summary", gdb.COMMAND_DATA) def show_subtree(self, clk, level): gdb.write("%*s%-*s %7d %8d %8d %11lu%s\n" % ( level * 3 + 1, "", 30 - level * 3, clk['name'].string(), clk['enable_count'], clk['prepare_count'], clk['protect_count'], clk['rate'], '(c)' if clk['flags'] & constants.LX_CLK_GET_RATE_NOCACHE else ' ')) for child in clk_core_for_each_child(clk['children']): self.show_subtree(child, level + 1) def invoke(self, arg, from_tty): if utils.gdb_eval_or_none("clk_root_list") is None: raise gdb.GdbError("No clocks registered") gdb.write(" enable prepare protect \n") gdb.write(" clock count count count rate \n") gdb.write("------------------------------------------------------------------------\n") for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_root_list")): self.show_subtree(clk, 0) for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_orphan_list")): self.show_subtree(clk, 0) LxClkSummary() class LxClkCoreLookup(gdb.Function): """Find struct clk_core by name""" def __init__(self): super(LxClkCoreLookup, self).__init__("lx_clk_core_lookup") def lookup_hlist(self, hlist_head, name): for child in clk_core_for_each_child(hlist_head): if child['name'].string() == name: return child result = self.lookup_hlist(child['children'], name) if result: return result def invoke(self, name): name = name.string() return (self.lookup_hlist(gdb.parse_and_eval("clk_root_list"), name) or self.lookup_hlist(gdb.parse_and_eval("clk_orphan_list"), name)) LxClkCoreLookup()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/clk.py
# # gdb helper commands and functions for Linux kernel debugging # # per-cpu tools # # Copyright (c) Siemens AG, 2011-2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb from linux import tasks, utils task_type = utils.CachedType("struct task_struct") MAX_CPUS = 4096 def get_current_cpu(): if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU: return gdb.selected_thread().num - 1 elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB: tid = gdb.selected_thread().ptid[2] if tid > (0x100000000 - MAX_CPUS - 2): return 0x100000000 - tid - 2 else: return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu'] else: raise gdb.GdbError("Sorry, obtaining the current CPU is not yet " "supported with this gdb server.") def per_cpu(var_ptr, cpu): if cpu == -1: cpu = get_current_cpu() if utils.is_target_arch("sparc:v9"): offset = gdb.parse_and_eval( "trap_block[{0}].__per_cpu_base".format(str(cpu))) else: try: offset = gdb.parse_and_eval( "__per_cpu_offset[{0}]".format(str(cpu))) except gdb.error: # !CONFIG_SMP case offset = 0 pointer = var_ptr.cast(utils.get_long_type()) + offset return pointer.cast(var_ptr.type).dereference() cpu_mask = {} def cpu_mask_invalidate(event): global cpu_mask cpu_mask = {} gdb.events.stop.disconnect(cpu_mask_invalidate) if hasattr(gdb.events, 'new_objfile'): gdb.events.new_objfile.disconnect(cpu_mask_invalidate) def cpu_list(mask_name): global cpu_mask mask = None if mask_name in cpu_mask: mask = cpu_mask[mask_name] if mask is None: mask = gdb.parse_and_eval(mask_name + ".bits") if hasattr(gdb, 'events'): cpu_mask[mask_name] = mask gdb.events.stop.connect(cpu_mask_invalidate) if hasattr(gdb.events, 'new_objfile'): gdb.events.new_objfile.connect(cpu_mask_invalidate) bits_per_entry = mask[0].type.sizeof * 8 num_entries = mask.type.sizeof * 8 / bits_per_entry entry = -1 bits = 0 while True: while bits == 0: entry += 1 if entry == num_entries: return bits = mask[entry] if bits != 0: bit = 0 break while bits & 1 == 0: bits >>= 1 bit += 1 cpu = entry * bits_per_entry + bit bits >>= 1 bit += 1 yield int(cpu) def each_online_cpu(): for cpu in cpu_list("__cpu_online_mask"): yield cpu def each_present_cpu(): for cpu in cpu_list("__cpu_present_mask"): yield cpu def each_possible_cpu(): for cpu in cpu_list("__cpu_possible_mask"): yield cpu def each_active_cpu(): for cpu in cpu_list("__cpu_active_mask"): yield cpu class LxCpus(gdb.Command): """List CPU status arrays Displays the known state of each CPU based on the kernel masks and can help identify the state of hotplugged CPUs""" def __init__(self): super(LxCpus, self).__init__("lx-cpus", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): gdb.write("Possible CPUs : {}\n".format(list(each_possible_cpu()))) gdb.write("Present CPUs : {}\n".format(list(each_present_cpu()))) gdb.write("Online CPUs : {}\n".format(list(each_online_cpu()))) gdb.write("Active CPUs : {}\n".format(list(each_active_cpu()))) LxCpus() class PerCpu(gdb.Function): """Return per-cpu variable. $lx_per_cpu("VAR"[, CPU]): Return the per-cpu variable called VAR for the given CPU number. If CPU is omitted, the CPU of the current context is used. Note that VAR has to be quoted as string.""" def __init__(self): super(PerCpu, self).__init__("lx_per_cpu") def invoke(self, var_name, cpu=-1): var_ptr = gdb.parse_and_eval("&" + var_name.string()) return per_cpu(var_ptr, cpu) PerCpu() def get_current_task(cpu): task_ptr_type = task_type.get_type().pointer() if utils.is_target_arch("x86"): var_ptr = gdb.parse_and_eval("&current_task") return per_cpu(var_ptr, cpu).dereference() elif utils.is_target_arch("aarch64"): current_task_addr = gdb.parse_and_eval("$SP_EL0") if((current_task_addr >> 63) != 0): current_task = current_task_addr.cast(task_ptr_type) return current_task.dereference() else: raise gdb.GdbError("Sorry, obtaining the current task is not allowed " "while running in userspace(EL0)") else: raise gdb.GdbError("Sorry, obtaining the current task is not yet " "supported with this arch") class LxCurrentFunc(gdb.Function): """Return current task. $lx_current([CPU]): Return the per-cpu task variable for the given CPU number. If CPU is omitted, the CPU of the current context is used.""" def __init__(self): super(LxCurrentFunc, self).__init__("lx_current") def invoke(self, cpu=-1): return get_current_task(cpu) LxCurrentFunc()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/cpus.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright 2019 Google LLC. import binascii import gdb from linux import constants from linux import cpus from linux import rbtree from linux import utils timerqueue_node_type = utils.CachedType("struct timerqueue_node").get_type() hrtimer_type = utils.CachedType("struct hrtimer").get_type() def ktime_get(): """Returns the current time, but not very accurately We can't read the hardware timer itself to add any nanoseconds that need to be added since we last stored the time in the timekeeper. But this is probably good enough for debug purposes.""" tk_core = gdb.parse_and_eval("&tk_core") return tk_core['timekeeper']['tkr_mono']['base'] def print_timer(rb_node, idx): timerqueue = utils.container_of(rb_node, timerqueue_node_type.pointer(), "node") timer = utils.container_of(timerqueue, hrtimer_type.pointer(), "node") function = str(timer['function']).split(" ")[1].strip("<>") softexpires = timer['_softexpires'] expires = timer['node']['expires'] now = ktime_get() text = " #{}: <{}>, {}, ".format(idx, timer, function) text += "S:{:02x}\n".format(int(timer['state'])) text += " # expires at {}-{} nsecs [in {} to {} nsecs]\n".format( softexpires, expires, softexpires - now, expires - now) return text def print_active_timers(base): curr = base['active']['next']['node'] curr = curr.address.cast(rbtree.rb_node_type.get_type().pointer()) idx = 0 while curr: yield print_timer(curr, idx) curr = rbtree.rb_next(curr) idx += 1 def print_base(base): text = " .base: {}\n".format(base.address) text += " .index: {}\n".format(base['index']) text += " .resolution: {} nsecs\n".format(constants.LX_hrtimer_resolution) text += " .get_time: {}\n".format(base['get_time']) if constants.LX_CONFIG_HIGH_RES_TIMERS: text += " .offset: {} nsecs\n".format(base['offset']) text += "active timers:\n" text += "".join([x for x in print_active_timers(base)]) return text def print_cpu(hrtimer_bases, cpu, max_clock_bases): cpu_base = cpus.per_cpu(hrtimer_bases, cpu) jiffies = gdb.parse_and_eval("jiffies_64") tick_sched_ptr = gdb.parse_and_eval("&tick_cpu_sched") ts = cpus.per_cpu(tick_sched_ptr, cpu) text = "cpu: {}\n".format(cpu) for i in range(max_clock_bases): text += " clock {}:\n".format(i) text += print_base(cpu_base['clock_base'][i]) if constants.LX_CONFIG_HIGH_RES_TIMERS: fmts = [(" .{} : {} nsecs", 'expires_next'), (" .{} : {}", 'hres_active'), (" .{} : {}", 'nr_events'), (" .{} : {}", 'nr_retries'), (" .{} : {}", 'nr_hangs'), (" .{} : {}", 'max_hang_time')] text += "\n".join([s.format(f, cpu_base[f]) for s, f in fmts]) text += "\n" if constants.LX_CONFIG_TICK_ONESHOT: fmts = [(" .{} : {}", 'nohz_mode'), (" .{} : {} nsecs", 'last_tick'), (" .{} : {}", 'tick_stopped'), (" .{} : {}", 'idle_jiffies'), (" .{} : {}", 'idle_calls'), (" .{} : {}", 'idle_sleeps'), (" .{} : {} nsecs", 'idle_entrytime'), (" .{} : {} nsecs", 'idle_waketime'), (" .{} : {} nsecs", 'idle_exittime'), (" .{} : {} nsecs", 'idle_sleeptime'), (" .{}: {} nsecs", 'iowait_sleeptime'), (" .{} : {}", 'last_jiffies'), (" .{} : {}", 'next_timer'), (" .{} : {} nsecs", 'idle_expires')] text += "\n".join([s.format(f, ts[f]) for s, f in fmts]) text += "\njiffies: {}\n".format(jiffies) text += "\n" return text def print_tickdevice(td, cpu): dev = td['evtdev'] text = "Tick Device: mode: {}\n".format(td['mode']) if cpu < 0: text += "Broadcast device\n" else: text += "Per CPU device: {}\n".format(cpu) text += "Clock Event Device: " if dev == 0: text += "<NULL>\n" return text text += "{}\n".format(dev['name']) text += " max_delta_ns: {}\n".format(dev['max_delta_ns']) text += " min_delta_ns: {}\n".format(dev['min_delta_ns']) text += " mult: {}\n".format(dev['mult']) text += " shift: {}\n".format(dev['shift']) text += " mode: {}\n".format(dev['state_use_accessors']) text += " next_event: {} nsecs\n".format(dev['next_event']) text += " set_next_event: {}\n".format(dev['set_next_event']) members = [('set_state_shutdown', " shutdown: {}\n"), ('set_state_periodic', " periodic: {}\n"), ('set_state_oneshot', " oneshot: {}\n"), ('set_state_oneshot_stopped', " oneshot stopped: {}\n"), ('tick_resume', " resume: {}\n")] for member, fmt in members: if dev[member]: text += fmt.format(dev[member]) text += " event_handler: {}\n".format(dev['event_handler']) text += " retries: {}\n".format(dev['retries']) return text def pr_cpumask(mask): nr_cpu_ids = 1 if constants.LX_NR_CPUS > 1: nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids") inf = gdb.inferiors()[0] bits = mask['bits'] num_bytes = (nr_cpu_ids + 7) / 8 buf = utils.read_memoryview(inf, bits, num_bytes).tobytes() buf = binascii.b2a_hex(buf) if type(buf) is not str: buf=buf.decode() chunks = [] i = num_bytes while i > 0: i -= 1 start = i * 2 end = start + 2 chunks.append(buf[start:end]) if i != 0 and i % 4 == 0: chunks.append(',') extra = nr_cpu_ids % 8 if 0 < extra <= 4: chunks[0] = chunks[0][0] # Cut off the first 0 return "".join(chunks) class LxTimerList(gdb.Command): """Print /proc/timer_list""" def __init__(self): super(LxTimerList, self).__init__("lx-timerlist", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): hrtimer_bases = gdb.parse_and_eval("&hrtimer_bases") max_clock_bases = gdb.parse_and_eval("HRTIMER_MAX_CLOCK_BASES") text = "Timer List Version: gdb scripts\n" text += "HRTIMER_MAX_CLOCK_BASES: {}\n".format(max_clock_bases) text += "now at {} nsecs\n".format(ktime_get()) for cpu in cpus.each_online_cpu(): text += print_cpu(hrtimer_bases, cpu, max_clock_bases) if constants.LX_CONFIG_GENERIC_CLOCKEVENTS: if constants.LX_CONFIG_GENERIC_CLOCKEVENTS_BROADCAST: bc_dev = gdb.parse_and_eval("&tick_broadcast_device") text += print_tickdevice(bc_dev, -1) text += "\n" mask = gdb.parse_and_eval("tick_broadcast_mask") mask = pr_cpumask(mask) text += "tick_broadcast_mask: {}\n".format(mask) if constants.LX_CONFIG_TICK_ONESHOT: mask = gdb.parse_and_eval("tick_broadcast_oneshot_mask") mask = pr_cpumask(mask) text += "tick_broadcast_oneshot_mask: {}\n".format(mask) text += "\n" tick_cpu_devices = gdb.parse_and_eval("&tick_cpu_device") for cpu in cpus.each_online_cpu(): tick_dev = cpus.per_cpu(tick_cpu_devices, cpu) text += print_tickdevice(tick_dev, cpu) text += "\n" gdb.write(text) LxTimerList()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/timerlist.py
# nothing to do for the initialization of this package
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/__init__.py
# # gdb helper commands and functions for Linux kernel debugging # # load kernel and module symbols # # Copyright (c) Siemens AG, 2011-2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb import os import re from linux import modules, utils if hasattr(gdb, 'Breakpoint'): class LoadModuleBreakpoint(gdb.Breakpoint): def __init__(self, spec, gdb_command): super(LoadModuleBreakpoint, self).__init__(spec, internal=True) self.silent = True self.gdb_command = gdb_command def stop(self): module = gdb.parse_and_eval("mod") module_name = module['name'].string() cmd = self.gdb_command # enforce update if object file is not found cmd.module_files_updated = False # Disable pagination while reporting symbol (re-)loading. # The console input is blocked in this context so that we would # get stuck waiting for the user to acknowledge paged output. show_pagination = gdb.execute("show pagination", to_string=True) pagination = show_pagination.endswith("on.\n") gdb.execute("set pagination off") if module_name in cmd.loaded_modules: gdb.write("refreshing all symbols to reload module " "'{0}'\n".format(module_name)) cmd.load_all_symbols() else: cmd.load_module_symbols(module) # restore pagination state gdb.execute("set pagination %s" % ("on" if pagination else "off")) return False class LxSymbols(gdb.Command): """(Re-)load symbols of Linux kernel and currently loaded modules. The kernel (vmlinux) is taken from the current working directly. Modules (.ko) are scanned recursively, starting in the same directory. Optionally, the module search path can be extended by a space separated list of paths passed to the lx-symbols command.""" module_paths = [] module_files = [] module_files_updated = False loaded_modules = [] breakpoint = None def __init__(self): super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES, gdb.COMPLETE_FILENAME) def _update_module_files(self): self.module_files = [] for path in self.module_paths: gdb.write("scanning for modules in {0}\n".format(path)) for root, dirs, files in os.walk(path): for name in files: if name.endswith(".ko") or name.endswith(".ko.debug"): self.module_files.append(root + "/" + name) self.module_files_updated = True def _get_module_file(self, module_name): module_pattern = ".*/{0}\.ko(?:.debug)?$".format( module_name.replace("_", r"[_\-]")) for name in self.module_files: if re.match(module_pattern, name) and os.path.exists(name): return name return None def _section_arguments(self, module): try: sect_attrs = module['sect_attrs'].dereference() except gdb.error: return "" attrs = sect_attrs['attrs'] section_name_to_address = { attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss", ".text", ".text.hot", ".text.unlikely"]: address = section_name_to_address.get(section_name) if address: args.append(" -s {name} {addr}".format( name=section_name, addr=str(address))) return "".join(args) def load_module_symbols(self, module): module_name = module['name'].string() module_addr = str(module['core_layout']['base']).split()[0] module_file = self._get_module_file(module_name) if not module_file and not self.module_files_updated: self._update_module_files() module_file = self._get_module_file(module_name) if module_file: if utils.is_target_arch('s390'): # Module text is preceded by PLT stubs on s390. module_arch = module['arch'] plt_offset = int(module_arch['plt_offset']) plt_size = int(module_arch['plt_size']) module_addr = hex(int(module_addr, 0) + plt_offset + plt_size) gdb.write("loading @{addr}: {filename}\n".format( addr=module_addr, filename=module_file)) cmdline = "add-symbol-file {filename} {addr}{sections}".format( filename=module_file, addr=module_addr, sections=self._section_arguments(module)) gdb.execute(cmdline, to_string=True) if module_name not in self.loaded_modules: self.loaded_modules.append(module_name) else: gdb.write("no module object found for '{0}'\n".format(module_name)) def load_all_symbols(self): gdb.write("loading vmlinux\n") # Dropping symbols will disable all breakpoints. So save their states # and restore them afterward. saved_states = [] if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None: for bp in gdb.breakpoints(): saved_states.append({'breakpoint': bp, 'enabled': bp.enabled}) # drop all current symbols and reload vmlinux orig_vmlinux = 'vmlinux' for obj in gdb.objfiles(): if (obj.filename.endswith('vmlinux') or obj.filename.endswith('vmlinux.debug')): orig_vmlinux = obj.filename gdb.execute("symbol-file", to_string=True) gdb.execute("symbol-file {0}".format(orig_vmlinux)) self.loaded_modules = [] module_list = modules.module_list() if not module_list: gdb.write("no modules found\n") else: [self.load_module_symbols(module) for module in module_list] for saved_state in saved_states: saved_state['breakpoint'].enabled = saved_state['enabled'] def invoke(self, arg, from_tty): self.module_paths = [os.path.abspath(os.path.expanduser(p)) for p in arg.split()] self.module_paths.append(os.getcwd()) # enforce update self.module_files = [] self.module_files_updated = False self.load_all_symbols() if hasattr(gdb, 'Breakpoint'): if self.breakpoint is not None: self.breakpoint.delete() self.breakpoint = None self.breakpoint = LoadModuleBreakpoint( "kernel/module/main.c:do_init_module", self) else: gdb.write("Note: symbol update on module loading not supported " "with this gdb version\n") LxSymbols()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/symbols.py
# # gdb helper commands and functions for Linux kernel debugging # # common utilities # # Copyright (c) Siemens AG, 2011-2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb class CachedType: def __init__(self, name): self._type = None self._name = name def _new_objfile_handler(self, event): self._type = None gdb.events.new_objfile.disconnect(self._new_objfile_handler) def get_type(self): if self._type is None: self._type = gdb.lookup_type(self._name) if self._type is None: raise gdb.GdbError( "cannot resolve type '{0}'".format(self._name)) if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'): gdb.events.new_objfile.connect(self._new_objfile_handler) return self._type long_type = CachedType("long") atomic_long_type = CachedType("atomic_long_t") def get_long_type(): global long_type return long_type.get_type() def offset_of(typeobj, field): element = gdb.Value(0).cast(typeobj) return int(str(element[field].address).split()[0], 16) def container_of(ptr, typeobj, member): return (ptr.cast(get_long_type()) - offset_of(typeobj, member)).cast(typeobj) class ContainerOf(gdb.Function): """Return pointer to containing data structure. $container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the data structure of the type TYPE in which PTR is the address of ELEMENT. Note that TYPE and ELEMENT have to be quoted as strings.""" def __init__(self): super(ContainerOf, self).__init__("container_of") def invoke(self, ptr, typename, elementname): return container_of(ptr, gdb.lookup_type(typename.string()).pointer(), elementname.string()) ContainerOf() BIG_ENDIAN = 0 LITTLE_ENDIAN = 1 target_endianness = None def get_target_endianness(): global target_endianness if target_endianness is None: endian = gdb.execute("show endian", to_string=True) if "little endian" in endian: target_endianness = LITTLE_ENDIAN elif "big endian" in endian: target_endianness = BIG_ENDIAN else: raise gdb.GdbError("unknown endianness '{0}'".format(str(endian))) return target_endianness def read_memoryview(inf, start, length): m = inf.read_memory(start, length) if type(m) is memoryview: return m return memoryview(m) def read_u16(buffer, offset): buffer_val = buffer[offset:offset + 2] value = [0, 0] if type(buffer_val[0]) is str: value[0] = ord(buffer_val[0]) value[1] = ord(buffer_val[1]) else: value[0] = buffer_val[0] value[1] = buffer_val[1] if get_target_endianness() == LITTLE_ENDIAN: return value[0] + (value[1] << 8) else: return value[1] + (value[0] << 8) def read_u32(buffer, offset): if get_target_endianness() == LITTLE_ENDIAN: return read_u16(buffer, offset) + (read_u16(buffer, offset + 2) << 16) else: return read_u16(buffer, offset + 2) + (read_u16(buffer, offset) << 16) def read_u64(buffer, offset): if get_target_endianness() == LITTLE_ENDIAN: return read_u32(buffer, offset) + (read_u32(buffer, offset + 4) << 32) else: return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32) def read_ulong(buffer, offset): if get_long_type().sizeof == 8: return read_u64(buffer, offset) else: return read_u32(buffer, offset) atomic_long_counter_offset = atomic_long_type.get_type()['counter'].bitpos atomic_long_counter_sizeof = atomic_long_type.get_type()['counter'].type.sizeof def read_atomic_long(buffer, offset): global atomic_long_counter_offset global atomic_long_counter_sizeof if atomic_long_counter_sizeof == 8: return read_u64(buffer, offset + atomic_long_counter_offset) else: return read_u32(buffer, offset + atomic_long_counter_offset) target_arch = None def is_target_arch(arch): if hasattr(gdb.Frame, 'architecture'): return arch in gdb.newest_frame().architecture().name() else: global target_arch if target_arch is None: target_arch = gdb.execute("show architecture", to_string=True) return arch in target_arch GDBSERVER_QEMU = 0 GDBSERVER_KGDB = 1 gdbserver_type = None def get_gdbserver_type(): def exit_handler(event): global gdbserver_type gdbserver_type = None gdb.events.exited.disconnect(exit_handler) def probe_qemu(): try: return gdb.execute("monitor info version", to_string=True) != "" except gdb.error: return False def probe_kgdb(): try: thread_info = gdb.execute("info thread 2", to_string=True) return "shadowCPU0" in thread_info except gdb.error: return False global gdbserver_type if gdbserver_type is None: if probe_qemu(): gdbserver_type = GDBSERVER_QEMU elif probe_kgdb(): gdbserver_type = GDBSERVER_KGDB if gdbserver_type is not None and hasattr(gdb, 'events'): gdb.events.exited.connect(exit_handler) return gdbserver_type def gdb_eval_or_none(expresssion): try: return gdb.parse_and_eval(expresssion) except gdb.error: return None def dentry_name(d): parent = d['d_parent'] if parent == d or parent == 0: return "" p = dentry_name(d['d_parent']) + "/" return p + d['d_iname'].string()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/utils.py
# # gdb helper commands and functions for Linux kernel debugging # # module tools # # Copyright (c) Siemens AG, 2013 # # Authors: # Jan Kiszka <[email protected]> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb from linux import cpus, utils, lists module_type = utils.CachedType("struct module") def module_list(): global module_type modules = utils.gdb_eval_or_none("modules") if modules is None: return module_ptr_type = module_type.get_type().pointer() for module in lists.list_for_each_entry(modules, module_ptr_type, "list"): yield module def find_module_by_name(name): for module in module_list(): if module['name'].string() == name: return module return None class LxModule(gdb.Function): """Find module by name and return the module variable. $lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules of the target and return that module variable which MODULE matches.""" def __init__(self): super(LxModule, self).__init__("lx_module") def invoke(self, mod_name): mod_name = mod_name.string() module = find_module_by_name(mod_name) if module: return module.dereference() else: raise gdb.GdbError("Unable to find MODULE " + mod_name) LxModule() class LxLsmod(gdb.Command): """List currently loaded modules.""" _module_use_type = utils.CachedType("struct module_use") def __init__(self): super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): gdb.write( "Address{0} Module Size Used by\n".format( " " if utils.get_long_type().sizeof == 8 else "")) for module in module_list(): layout = module['core_layout'] gdb.write("{address} {name:<19} {size:>8} {ref}".format( address=str(layout['base']).split()[0], name=module['name'].string(), size=str(layout['size']), ref=str(module['refcnt']['counter'] - 1))) t = self._module_use_type.get_type().pointer() first = True sources = module['source_list'] for use in lists.list_for_each_entry(sources, t, "source_list"): gdb.write("{separator}{name}".format( separator=" " if first else ",", name=use['source']['name'].string())) first = False gdb.write("\n") LxLsmod()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/modules.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright (c) NXP 2019 import gdb import sys from linux.utils import CachedType, gdb_eval_or_none from linux.lists import list_for_each_entry generic_pm_domain_type = CachedType('struct generic_pm_domain') pm_domain_data_type = CachedType('struct pm_domain_data') device_link_type = CachedType('struct device_link') def kobject_get_path(kobj): path = kobj['name'].string() parent = kobj['parent'] if parent: path = kobject_get_path(parent) + '/' + path return path def rtpm_status_str(dev): if dev['power']['runtime_error']: return 'error' if dev['power']['disable_depth']: return 'unsupported' _RPM_STATUS_LOOKUP = [ "active", "resuming", "suspended", "suspending" ] return _RPM_STATUS_LOOKUP[dev['power']['runtime_status']] class LxGenPDSummary(gdb.Command): '''Print genpd summary Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary''' def __init__(self): super(LxGenPDSummary, self).__init__('lx-genpd-summary', gdb.COMMAND_DATA) def summary_one(self, genpd): if genpd['status'] == 0: status_string = 'on' else: status_string = 'off-{}'.format(genpd['state_idx']) child_names = [] for link in list_for_each_entry( genpd['parent_links'], device_link_type.get_type().pointer(), 'parent_node'): child_names.append(link['child']['name']) gdb.write('%-30s %-15s %s\n' % ( genpd['name'].string(), status_string, ', '.join(child_names))) # Print devices in domain for pm_data in list_for_each_entry(genpd['dev_list'], pm_domain_data_type.get_type().pointer(), 'list_node'): dev = pm_data['dev'] kobj_path = kobject_get_path(dev['kobj']) gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev))) def invoke(self, arg, from_tty): if gdb_eval_or_none("&gpd_list") is None: raise gdb.GdbError("No power domain(s) registered") gdb.write('domain status children\n'); gdb.write(' /device runtime status\n'); gdb.write('----------------------------------------------------------------------\n'); for genpd in list_for_each_entry( gdb.parse_and_eval('&gpd_list'), generic_pm_domain_type.get_type().pointer(), 'gpd_list_node'): self.summary_one(genpd) LxGenPDSummary()
grace-kernel-grace-kernel-6.1.y
scripts/gdb/linux/genpd.py
# SPDX-License-Identifier: GPL-2.0 # # Copyright (C) 2018 Masahiro Yamada <[email protected]> # """ Kconfig unit testing framework. This provides fixture functions commonly used from test files. """ import os import pytest import shutil import subprocess import tempfile CONF_PATH = os.path.abspath(os.path.join('scripts', 'kconfig', 'conf')) class Conf: """Kconfig runner and result checker. This class provides methods to run text-based interface of Kconfig (scripts/kconfig/conf) and retrieve the resulted configuration, stdout, and stderr. It also provides methods to compare those results with expectations. """ def __init__(self, request): """Create a new Conf instance. request: object to introspect the requesting test module """ # the directory of the test being run self._test_dir = os.path.dirname(str(request.fspath)) # runners def _run_conf(self, mode, dot_config=None, out_file='.config', interactive=False, in_keys=None, extra_env={}): """Run text-based Kconfig executable and save the result. mode: input mode option (--oldaskconfig, --defconfig=<file> etc.) dot_config: .config file to use for configuration base out_file: file name to contain the output config data interactive: flag to specify the interactive mode in_keys: key inputs for interactive modes extra_env: additional environments returncode: exit status of the Kconfig executable """ command = [CONF_PATH, mode, 'Kconfig'] # Override 'srctree' environment to make the test as the top directory extra_env['srctree'] = self._test_dir # Clear KCONFIG_DEFCONFIG_LIST to keep unit tests from being affected # by the user's environment. extra_env['KCONFIG_DEFCONFIG_LIST'] = '' # Run Kconfig in a temporary directory. # This directory is automatically removed when done. with tempfile.TemporaryDirectory() as temp_dir: # if .config is given, copy it to the working directory if dot_config: shutil.copyfile(os.path.join(self._test_dir, dot_config), os.path.join(temp_dir, '.config')) ps = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=temp_dir, env=dict(os.environ, **extra_env)) # If input key sequence is given, feed it to stdin. if in_keys: ps.stdin.write(in_keys.encode('utf-8')) while ps.poll() is None: # For interactive modes such as oldaskconfig, oldconfig, # send 'Enter' key until the program finishes. if interactive: ps.stdin.write(b'\n') self.retcode = ps.returncode self.stdout = ps.stdout.read().decode() self.stderr = ps.stderr.read().decode() # Retrieve the resulted config data only when .config is supposed # to exist. If the command fails, the .config does not exist. # 'listnewconfig' does not produce .config in the first place. if self.retcode == 0 and out_file: with open(os.path.join(temp_dir, out_file)) as f: self.config = f.read() else: self.config = None # Logging: # Pytest captures the following information by default. In failure # of tests, the captured log will be displayed. This will be useful to # figure out what has happened. print("[command]\n{}\n".format(' '.join(command))) print("[retcode]\n{}\n".format(self.retcode)) print("[stdout]") print(self.stdout) print("[stderr]") print(self.stderr) if self.config is not None: print("[output for '{}']".format(out_file)) print(self.config) return self.retcode def oldaskconfig(self, dot_config=None, in_keys=None): """Run oldaskconfig. dot_config: .config file to use for configuration base (optional) in_key: key inputs (optional) returncode: exit status of the Kconfig executable """ return self._run_conf('--oldaskconfig', dot_config=dot_config, interactive=True, in_keys=in_keys) def oldconfig(self, dot_config=None, in_keys=None): """Run oldconfig. dot_config: .config file to use for configuration base (optional) in_key: key inputs (optional) returncode: exit status of the Kconfig executable """ return self._run_conf('--oldconfig', dot_config=dot_config, interactive=True, in_keys=in_keys) def olddefconfig(self, dot_config=None): """Run olddefconfig. dot_config: .config file to use for configuration base (optional) returncode: exit status of the Kconfig executable """ return self._run_conf('--olddefconfig', dot_config=dot_config) def defconfig(self, defconfig): """Run defconfig. defconfig: defconfig file for input returncode: exit status of the Kconfig executable """ defconfig_path = os.path.join(self._test_dir, defconfig) return self._run_conf('--defconfig={}'.format(defconfig_path)) def _allconfig(self, mode, all_config): if all_config: all_config_path = os.path.join(self._test_dir, all_config) extra_env = {'KCONFIG_ALLCONFIG': all_config_path} else: extra_env = {} return self._run_conf('--{}config'.format(mode), extra_env=extra_env) def allyesconfig(self, all_config=None): """Run allyesconfig. all_config: fragment config file for KCONFIG_ALLCONFIG (optional) returncode: exit status of the Kconfig executable """ return self._allconfig('allyes', all_config) def allmodconfig(self, all_config=None): """Run allmodconfig. all_config: fragment config file for KCONFIG_ALLCONFIG (optional) returncode: exit status of the Kconfig executable """ return self._allconfig('allmod', all_config) def allnoconfig(self, all_config=None): """Run allnoconfig. all_config: fragment config file for KCONFIG_ALLCONFIG (optional) returncode: exit status of the Kconfig executable """ return self._allconfig('allno', all_config) def alldefconfig(self, all_config=None): """Run alldefconfig. all_config: fragment config file for KCONFIG_ALLCONFIG (optional) returncode: exit status of the Kconfig executable """ return self._allconfig('alldef', all_config) def randconfig(self, all_config=None): """Run randconfig. all_config: fragment config file for KCONFIG_ALLCONFIG (optional) returncode: exit status of the Kconfig executable """ return self._allconfig('rand', all_config) def savedefconfig(self, dot_config): """Run savedefconfig. dot_config: .config file for input returncode: exit status of the Kconfig executable """ return self._run_conf('--savedefconfig', out_file='defconfig') def listnewconfig(self, dot_config=None): """Run listnewconfig. dot_config: .config file to use for configuration base (optional) returncode: exit status of the Kconfig executable """ return self._run_conf('--listnewconfig', dot_config=dot_config, out_file=None) # checkers def _read_and_compare(self, compare, expected): """Compare the result with expectation. compare: function to compare the result with expectation expected: file that contains the expected data """ with open(os.path.join(self._test_dir, expected)) as f: expected_data = f.read() return compare(self, expected_data) def _contains(self, attr, expected): return self._read_and_compare( lambda s, e: getattr(s, attr).find(e) >= 0, expected) def _matches(self, attr, expected): return self._read_and_compare(lambda s, e: getattr(s, attr) == e, expected) def config_contains(self, expected): """Check if resulted configuration contains expected data. expected: file that contains the expected data returncode: True if result contains the expected data, False otherwise """ return self._contains('config', expected) def config_matches(self, expected): """Check if resulted configuration exactly matches expected data. expected: file that contains the expected data returncode: True if result matches the expected data, False otherwise """ return self._matches('config', expected) def stdout_contains(self, expected): """Check if resulted stdout contains expected data. expected: file that contains the expected data returncode: True if result contains the expected data, False otherwise """ return self._contains('stdout', expected) def stdout_matches(self, expected): """Check if resulted stdout exactly matches expected data. expected: file that contains the expected data returncode: True if result matches the expected data, False otherwise """ return self._matches('stdout', expected) def stderr_contains(self, expected): """Check if resulted stderr contains expected data. expected: file that contains the expected data returncode: True if result contains the expected data, False otherwise """ return self._contains('stderr', expected) def stderr_matches(self, expected): """Check if resulted stderr exactly matches expected data. expected: file that contains the expected data returncode: True if result matches the expected data, False otherwise """ return self._matches('stderr', expected) @pytest.fixture(scope="module") def conf(request): """Create a Conf instance and provide it to test functions.""" return Conf(request)
grace-kernel-grace-kernel-6.1.y
scripts/kconfig/tests/conftest.py
# SPDX-License-Identifier: GPL-2.0 """ Do not affect user-assigned choice value by another choice. Handling of state flags for choices is complecated. In old days, the defconfig result of a choice could be affected by another choice if those choices interact by 'depends on', 'select', etc. Related Linux commit: fbe98bb9ed3dae23e320c6b113e35f129538d14a """ def test(conf): assert conf.defconfig('defconfig') == 0 assert conf.config_contains('expected_config')
grace-kernel-grace-kernel-6.1.y
scripts/kconfig/tests/inter_choice/__init__.py
# SPDX-License-Identifier: GPL-2.0 """ Create submenu for symbols that depend on the preceding one. If a symbols has dependency on the preceding symbol, the menu entry should become the submenu of the preceding one, and displayed with deeper indentation. """ def test(conf): assert conf.oldaskconfig() == 0 assert conf.stdout_contains('expected_stdout')
grace-kernel-grace-kernel-6.1.y
scripts/kconfig/tests/auto_submenu/__init__.py
# SPDX-License-Identifier: GPL-2.0 """ Escape sequence tests. """ def test(conf): assert conf.oldaskconfig() == 0 assert conf.stderr_matches('expected_stderr')
grace-kernel-grace-kernel-6.1.y
scripts/kconfig/tests/preprocess/escape/__init__.py
# SPDX-License-Identifier: GPL-2.0 """ Variable and user-defined function tests. """ def test(conf): assert conf.oldaskconfig() == 0 assert conf.stderr_matches('expected_stderr')
grace-kernel-grace-kernel-6.1.y
scripts/kconfig/tests/preprocess/variable/__init__.py