python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import errno
import functools
import html
import io
from multiprocessing import cpu_count
import os.path
import re
import shutil
import sys
from pygments import highlight
from pygments.lexers.c_cpp import CppLexer
from pygments.formatters import HtmlFormatter
import optpmap
import optrecord
desc = '''Generate HTML output to visualize optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
The tools requires PyYAML and Pygments Python packages.'''
# This allows passing the global context to the child processes.
class Context:
def __init__(self, caller_loc = dict()):
# Map function names to their source location for function where inlining happened
self.caller_loc = caller_loc
context = Context()
def suppress(remark):
if remark.Name == 'sil.Specialized':
return remark.getArgDict()['Function'][0].startswith('\"Swift.')
elif remark.Name == 'sil.Inlined':
return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.'))
return False
class SourceFileRenderer:
def __init__(self, source_dir, output_dir, filename, no_highlight):
self.filename = filename
existing_filename = None
if os.path.exists(filename):
existing_filename = filename
else:
fn = os.path.join(source_dir, filename)
if os.path.exists(fn):
existing_filename = fn
self.no_highlight = no_highlight
self.stream = io.open(os.path.join(output_dir, optrecord.html_file_name(filename)), 'w', encoding='utf-8')
if existing_filename:
self.source_stream = io.open(existing_filename, encoding='utf-8')
else:
self.source_stream = None
print(u'''
<html>
<h1>Unable to locate file {}</h1>
</html>
'''.format(filename), file=self.stream)
self.html_formatter = HtmlFormatter(encoding='utf-8')
self.cpp_lexer = CppLexer(stripnl=False)
def render_source_lines(self, stream, line_remarks):
file_text = stream.read()
if self.no_highlight:
html_highlighted = file_text
else:
html_highlighted = highlight(
file_text,
self.cpp_lexer,
self.html_formatter)
# Note that the API is different between Python 2 and 3. On
# Python 3, pygments.highlight() returns a bytes object, so we
# have to decode. On Python 2, the output is str but since we
# support unicode characters and the output streams is unicode we
# decode too.
html_highlighted = html_highlighted.decode('utf-8')
# Take off the header and footer, these must be
# reapplied line-wise, within the page structure
html_highlighted = html_highlighted.replace('<div class="highlight"><pre>', '')
html_highlighted = html_highlighted.replace('</pre></div>', '')
for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1):
print(u'''
<tr>
<td><a name=\"L{linenum}\">{linenum}</a></td>
<td></td>
<td></td>
<td><div class="highlight"><pre>{html_line}</pre></div></td>
</tr>'''.format(**locals()), file=self.stream)
for remark in line_remarks.get(linenum, []):
if not suppress(remark):
self.render_inline_remarks(remark, html_line)
def render_inline_remarks(self, r, line):
inlining_context = r.DemangledFunctionName
dl = context.caller_loc.get(r.Function)
if dl:
dl_dict = dict(list(dl))
link = optrecord.make_link(dl_dict['File'], dl_dict['Line'] - 2)
inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(**locals())
# Column is the number of characters *including* tabs, keep those and
# replace everything else with spaces.
indent = line[:max(r.Column, 1) - 1]
indent = re.sub('\S', ' ', indent)
# Create expanded message and link if we have a multiline message.
lines = r.message.split('\n')
if len(lines) > 1:
expand_link = '<a style="text-decoration: none;" href="" onclick="toggleExpandedMessage(this); return false;">+</a>'
message = lines[0]
expand_message = u'''
<div class="full-info" style="display:none;">
<div class="col-left"><pre style="display:inline">{}</pre></div>
<div class="expanded col-left"><pre>{}</pre></div>
</div>'''.format(indent, '\n'.join(lines[1:]))
else:
expand_link = ''
expand_message = ''
message = r.message
print(u'''
<tr>
<td></td>
<td>{r.RelativeHotness}</td>
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
<td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\">{expand_link} {message} </span>{expand_message}</td>
<td class=\"column-entry-yellow\">{inlining_context}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, line_remarks):
if not self.source_stream:
return
print(u'''
<html>
<title>{}</title>
<meta charset="utf-8" />
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
<script type="text/javascript">
/* Simple helper to show/hide the expanded message of a remark. */
function toggleExpandedMessage(e) {{
var FullTextElems = e.parentElement.parentElement.getElementsByClassName("full-info");
if (!FullTextElems || FullTextElems.length < 1) {{
return false;
}}
var FullText = FullTextElems[0];
if (FullText.style.display == 'none') {{
e.innerHTML = '-';
FullText.style.display = 'block';
}} else {{
e.innerHTML = '+';
FullText.style.display = 'none';
}}
}}
</script>
</head>
<body>
<div class="centered">
<table class="source">
<thead>
<tr>
<th style="width: 2%">Line</td>
<th style="width: 3%">Hotness</td>
<th style="width: 10%">Optimization</td>
<th style="width: 70%">Source</td>
<th style="width: 15%">Inline Context</td>
</tr>
</thead>
<tbody>'''.format(os.path.basename(self.filename)), file=self.stream)
self.render_source_lines(self.source_stream, line_remarks)
print(u'''
</tbody>
</table>
</body>
</html>''', file=self.stream)
class IndexRenderer:
def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_index):
self.stream = io.open(os.path.join(output_dir, 'index.html'), 'w', encoding='utf-8')
self.should_display_hotness = should_display_hotness
self.max_hottest_remarks_on_index = max_hottest_remarks_on_index
def render_entry(self, r, odd):
escaped_name = html.escape(r.DemangledFunctionName)
print(u'''
<tr>
<td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>
<td class=\"column-entry-{odd}\">{r.RelativeHotness}</td>
<td class=\"column-entry-{odd}\">{escaped_name}</td>
<td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, all_remarks):
print(u'''
<html>
<meta charset="utf-8" />
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table>
<tr>
<td>Source Location</td>
<td>Hotness</td>
<td>Function</td>
<td>Pass</td>
</tr>''', file=self.stream)
max_entries = None
if self.should_display_hotness:
max_entries = self.max_hottest_remarks_on_index
for i, remark in enumerate(all_remarks[:max_entries]):
if not suppress(remark):
self.render_entry(remark, i % 2)
print(u'''
</table>
</body>
</html>''', file=self.stream)
def _render_file(source_dir, output_dir, ctx, no_highlight, entry, filter_):
global context
context = ctx
filename, remarks = entry
SourceFileRenderer(source_dir, output_dir, filename, no_highlight).render(remarks)
def map_remarks(all_remarks):
# Set up a map between function names and their source location for
# function where inlining happened
for remark in optrecord.itervalues(all_remarks):
if isinstance(remark, optrecord.Passed) and remark.Pass == "inline" and remark.Name == "Inlined":
for arg in remark.Args:
arg_dict = dict(list(arg))
caller = arg_dict.get('Caller')
if caller:
try:
context.caller_loc[caller] = arg_dict['DebugLoc']
except KeyError:
pass
def generate_report(all_remarks,
file_remarks,
source_dir,
output_dir,
no_highlight,
should_display_hotness,
max_hottest_remarks_on_index,
num_jobs,
should_print_progress):
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise
if should_print_progress:
print('Rendering index page...')
if should_display_hotness:
sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.Hotness, r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function), reverse=True)
else:
sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function))
IndexRenderer(output_dir, should_display_hotness, max_hottest_remarks_on_index).render(sorted_remarks)
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"style.css"), output_dir)
_render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight)
if should_print_progress:
print('Rendering HTML files...')
optpmap.pmap(_render_file_bound,
file_remarks.items(),
num_jobs,
should_print_progress)
def main():
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'yaml_dirs_or_files',
nargs='+',
help='List of optimization record files or directories searched '
'for optimization record files.')
parser.add_argument(
'--output-dir',
'-o',
default='html',
help='Path to a directory where generated HTML files will be output. '
'If the directory does not already exist, it will be created. '
'"%(default)s" by default.')
parser.add_argument(
'--jobs',
'-j',
default=None,
type=int,
help='Max job count (defaults to %(default)s, the current CPU count)')
parser.add_argument(
'--source-dir',
'-s',
default='',
help='set source directory')
parser.add_argument(
'--no-progress-indicator',
'-n',
action='store_true',
default=False,
help='Do not display any indicator of how many YAML files were read '
'or rendered into HTML.')
parser.add_argument(
'--max-hottest-remarks-on-index',
default=1000,
type=int,
help='Maximum number of the hottest remarks to appear on the index page')
parser.add_argument(
'--no-highlight',
action='store_true',
default=False,
help='Do not use a syntax highlighter when rendering the source code')
parser.add_argument(
'--demangler',
help='Set the demangler to be used (defaults to %s)' % optrecord.Remark.default_demangler)
parser.add_argument(
'--filter',
default='',
help='Only display remarks from passes matching filter expression')
# Do not make this a global variable. Values needed to be propagated through
# to individual classes and functions to be portable with multiprocessing across
# Windows and non-Windows.
args = parser.parse_args()
print_progress = not args.no_progress_indicator
if args.demangler:
optrecord.Remark.set_demangler(args.demangler)
files = optrecord.find_opt_files(*args.yaml_dirs_or_files)
if not files:
parser.error("No *.opt.yaml files found")
sys.exit(1)
all_remarks, file_remarks, should_display_hotness = \
optrecord.gather_results(files, args.jobs, print_progress, args.filter)
map_remarks(all_remarks)
generate_report(all_remarks,
file_remarks,
args.source_dir,
args.output_dir,
args.no_highlight,
should_display_hotness,
args.max_hottest_remarks_on_index,
args.jobs,
print_progress)
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/opt-viewer/opt-viewer.py |
#!/usr/bin/env python
from __future__ import print_function
import io
import yaml
# Try to use the C parser.
try:
from yaml import CLoader as Loader
except ImportError:
print("For faster parsing, you may want to install libYAML for PyYAML")
from yaml import Loader
import html
from collections import defaultdict
import fnmatch
import functools
from multiprocessing import Lock
import os, os.path
import subprocess
try:
# The previously builtin function `intern()` was moved
# to the `sys` module in Python 3.
from sys import intern
except:
pass
import re
import optpmap
try:
dict.iteritems
except AttributeError:
# Python 3
def itervalues(d):
return iter(d.values())
def iteritems(d):
return iter(d.items())
else:
# Python 2
def itervalues(d):
return d.itervalues()
def iteritems(d):
return d.iteritems()
def html_file_name(filename):
return filename.replace('/', '_').replace('#', '_') + ".html"
def make_link(File, Line):
return "\"{}#L{}\"".format(html_file_name(File), Line)
class Remark(yaml.YAMLObject):
# Work-around for http://pyyaml.org/ticket/154.
yaml_loader = Loader
default_demangler = 'c++filt -n'
demangler_proc = None
@classmethod
def set_demangler(cls, demangler):
cls.demangler_proc = subprocess.Popen(demangler.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cls.demangler_lock = Lock()
@classmethod
def demangle(cls, name):
with cls.demangler_lock:
cls.demangler_proc.stdin.write((name + '\n').encode('utf-8'))
cls.demangler_proc.stdin.flush()
return cls.demangler_proc.stdout.readline().rstrip().decode('utf-8')
# Intern all strings since we have lot of duplication across filenames,
# remark text.
#
# Change Args from a list of dicts to a tuple of tuples. This saves
# memory in two ways. One, a small tuple is significantly smaller than a
# small dict. Two, using tuple instead of list allows Args to be directly
# used as part of the key (in Python only immutable types are hashable).
def _reduce_memory(self):
self.Pass = intern(self.Pass)
self.Name = intern(self.Name)
try:
# Can't intern unicode strings.
self.Function = intern(self.Function)
except:
pass
def _reduce_memory_dict(old_dict):
new_dict = dict()
for (k, v) in iteritems(old_dict):
if type(k) is str:
k = intern(k)
if type(v) is str:
v = intern(v)
elif type(v) is dict:
# This handles [{'Caller': ..., 'DebugLoc': { 'File': ... }}]
v = _reduce_memory_dict(v)
new_dict[k] = v
return tuple(new_dict.items())
self.Args = tuple([_reduce_memory_dict(arg_dict) for arg_dict in self.Args])
# The inverse operation of the dictonary-related memory optimization in
# _reduce_memory_dict. E.g.
# (('DebugLoc', (('File', ...) ... ))) -> [{'DebugLoc': {'File': ...} ....}]
def recover_yaml_structure(self):
def tuple_to_dict(t):
d = dict()
for (k, v) in t:
if type(v) is tuple:
v = tuple_to_dict(v)
d[k] = v
return d
self.Args = [tuple_to_dict(arg_tuple) for arg_tuple in self.Args]
def canonicalize(self):
if not hasattr(self, 'Hotness'):
self.Hotness = 0
if not hasattr(self, 'Args'):
self.Args = []
self._reduce_memory()
@property
def File(self):
return self.DebugLoc['File']
@property
def Line(self):
return int(self.DebugLoc['Line'])
@property
def Column(self):
return self.DebugLoc['Column']
@property
def DebugLocString(self):
return "{}:{}:{}".format(self.File, self.Line, self.Column)
@property
def DemangledFunctionName(self):
return self.demangle(self.Function)
@property
def Link(self):
return make_link(self.File, self.Line)
def getArgString(self, mapping):
mapping = dict(list(mapping))
dl = mapping.get('DebugLoc')
if dl:
del mapping['DebugLoc']
assert(len(mapping) == 1)
(key, value) = list(mapping.items())[0]
if key == 'Caller' or key == 'Callee' or key == 'DirectCallee':
value = html.escape(self.demangle(value))
if dl and key != 'Caller':
dl_dict = dict(list(dl))
return u"<a href={}>{}</a>".format(
make_link(dl_dict['File'], dl_dict['Line']), value)
else:
return value
# Return a cached dictionary for the arguments. The key for each entry is
# the argument key (e.g. 'Callee' for inlining remarks. The value is a
# list containing the value (e.g. for 'Callee' the function) and
# optionally a DebugLoc.
def getArgDict(self):
if hasattr(self, 'ArgDict'):
return self.ArgDict
self.ArgDict = {}
for arg in self.Args:
if len(arg) == 2:
if arg[0][0] == 'DebugLoc':
dbgidx = 0
else:
assert(arg[1][0] == 'DebugLoc')
dbgidx = 1
key = arg[1 - dbgidx][0]
entry = (arg[1 - dbgidx][1], arg[dbgidx][1])
else:
arg = arg[0]
key = arg[0]
entry = (arg[1], )
self.ArgDict[key] = entry
return self.ArgDict
def getDiffPrefix(self):
if hasattr(self, 'Added'):
if self.Added:
return '+'
else:
return '-'
return ''
@property
def PassWithDiffPrefix(self):
return self.getDiffPrefix() + self.Pass
@property
def message(self):
# Args is a list of mappings (dictionaries)
values = [self.getArgString(mapping) for mapping in self.Args]
return "".join(values)
@property
def RelativeHotness(self):
if self.max_hotness:
return "{0:.2f}%".format(self.Hotness * 100. / self.max_hotness)
else:
return ''
@property
def key(self):
return (self.__class__, self.PassWithDiffPrefix, self.Name, self.File,
self.Line, self.Column, self.Function, self.Args)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.key == other.key
def __repr__(self):
return str(self.key)
class Analysis(Remark):
yaml_tag = '!Analysis'
@property
def color(self):
return "white"
class AnalysisFPCommute(Analysis):
yaml_tag = '!AnalysisFPCommute'
class AnalysisAliasing(Analysis):
yaml_tag = '!AnalysisAliasing'
class Passed(Remark):
yaml_tag = '!Passed'
@property
def color(self):
return "green"
class Missed(Remark):
yaml_tag = '!Missed'
@property
def color(self):
return "red"
class Failure(Missed):
yaml_tag = '!Failure'
def get_remarks(input_file, filter_=None):
max_hotness = 0
all_remarks = dict()
file_remarks = defaultdict(functools.partial(defaultdict, list))
with io.open(input_file, encoding = 'utf-8') as f:
docs = yaml.load_all(f, Loader=Loader)
filter_e = None
if filter_:
filter_e = re.compile(filter_)
for remark in docs:
remark.canonicalize()
# Avoid remarks withoug debug location or if they are duplicated
if not hasattr(remark, 'DebugLoc') or remark.key in all_remarks:
continue
if filter_e and not filter_e.search(remark.Pass):
continue
all_remarks[remark.key] = remark
file_remarks[remark.File][remark.Line].append(remark)
# If we're reading a back a diff yaml file, max_hotness is already
# captured which may actually be less than the max hotness found
# in the file.
if hasattr(remark, 'max_hotness'):
max_hotness = remark.max_hotness
max_hotness = max(max_hotness, remark.Hotness)
return max_hotness, all_remarks, file_remarks
def gather_results(filenames, num_jobs, should_print_progress, filter_=None):
if should_print_progress:
print('Reading YAML files...')
if not Remark.demangler_proc:
Remark.set_demangler(Remark.default_demangler)
remarks = optpmap.pmap(
get_remarks, filenames, num_jobs, should_print_progress, filter_)
max_hotness = max(entry[0] for entry in remarks)
def merge_file_remarks(file_remarks_job, all_remarks, merged):
for filename, d in iteritems(file_remarks_job):
for line, remarks in iteritems(d):
for remark in remarks:
# Bring max_hotness into the remarks so that
# RelativeHotness does not depend on an external global.
remark.max_hotness = max_hotness
if remark.key not in all_remarks:
merged[filename][line].append(remark)
all_remarks = dict()
file_remarks = defaultdict(functools.partial(defaultdict, list))
for _, all_remarks_job, file_remarks_job in remarks:
merge_file_remarks(file_remarks_job, all_remarks, file_remarks)
all_remarks.update(all_remarks_job)
return all_remarks, file_remarks, max_hotness != 0
def find_opt_files(*dirs_or_files):
all = []
for dir_or_file in dirs_or_files:
if os.path.isfile(dir_or_file):
all.append(dir_or_file)
else:
for dir, subdirs, files in os.walk(dir_or_file):
# Exclude mounted directories and symlinks (os.walk default).
subdirs[:] = [d for d in subdirs
if not os.path.ismount(os.path.join(dir, d))]
for file in files:
if fnmatch.fnmatch(file, "*.opt.yaml*"):
all.append(os.path.join(dir, file))
return all
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/opt-viewer/optrecord.py |
import sys
import multiprocessing
_current = None
_total = None
def _init(current, total):
global _current
global _total
_current = current
_total = total
def _wrapped_func(func_and_args):
func, argument, should_print_progress, filter_ = func_and_args
if should_print_progress:
with _current.get_lock():
_current.value += 1
sys.stdout.write('\r\t{} of {}'.format(_current.value, _total.value))
sys.stdout.flush()
return func(argument, filter_)
def pmap(func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs):
"""
A parallel map function that reports on its progress.
Applies `func` to every item of `iterable` and return a list of the
results. If `processes` is greater than one, a process pool is used to run
the functions in parallel. `should_print_progress` is a boolean value that
indicates whether a string 'N of M' should be printed to indicate how many
of the functions have finished being run.
"""
global _current
global _total
_current = multiprocessing.Value('i', 0)
_total = multiprocessing.Value('i', len(iterable))
func_and_args = [(func, arg, should_print_progress, filter_) for arg in iterable]
if processes == 1:
result = list(map(_wrapped_func, func_and_args, *args, **kwargs))
else:
pool = multiprocessing.Pool(initializer=_init,
initargs=(_current, _total,),
processes=processes)
result = pool.map(_wrapped_func, func_and_args, *args, **kwargs)
pool.close()
pool.join()
if should_print_progress:
sys.stdout.write('\r')
return result
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/opt-viewer/optpmap.py |
#!/usr/bin/env python
"""A shuffle-select vector fuzz tester.
This is a python program to fuzz test the LLVM shufflevector and select
instructions. It generates a function with a random sequnece of shufflevectors
while optionally attaching it with a select instruction (regular or zero merge),
maintaining the element mapping accumulated across the function. It then
generates a main function which calls it with a different value in each element
and checks that the result matches the expected mapping.
Take the output IR printed to stdout, compile it to an executable using whatever
set of transforms you want to test, and run the program. If it crashes, it found
a bug (an error message with the expected and actual result is printed).
"""
from __future__ import print_function
import random
import uuid
import argparse
# Possibility of one undef index in generated mask for shufflevector instruction
SHUF_UNDEF_POS = 0.15
# Possibility of one undef index in generated mask for select instruction
SEL_UNDEF_POS = 0.15
# Possibility of adding a select instruction to the result of a shufflevector
ADD_SEL_POS = 0.4
# If we are adding a select instruction, this is the possibility of a
# merge-select instruction (1 - MERGE_SEL_POS = possibility of zero-merge-select
# instruction.
MERGE_SEL_POS = 0.5
test_template = r'''
define internal fastcc {ty} @test({inputs}) noinline nounwind {{
entry:
{instructions}
ret {ty} {last_name}
}}
'''
error_template = r'''@error.{lane} = private unnamed_addr global [64 x i8] c"FAIL: lane {lane}, expected {exp}, found %d\0A{padding}"'''
main_template = r'''
define i32 @main() {{
entry:
; Create a scratch space to print error messages.
%str = alloca [64 x i8]
%str.ptr = getelementptr inbounds [64 x i8], [64 x i8]* %str, i32 0, i32 0
; Build the input vector and call the test function.
%v = call fastcc {ty} @test({inputs})
br label %test.0
{check_die}
}}
declare i32 @strlen(i8*)
declare i32 @write(i32, i8*, i32)
declare i32 @sprintf(i8*, i8*, ...)
declare void @llvm.trap() noreturn nounwind
'''
check_template = r'''
test.{lane}:
%v.{lane} = extractelement {ty} %v, i32 {lane}
%cmp.{lane} = {i_f}cmp {ordered}ne {scalar_ty} %v.{lane}, {exp}
br i1 %cmp.{lane}, label %die.{lane}, label %test.{n_lane}
'''
undef_check_template = r'''
test.{lane}:
; Skip this lane, its value is undef.
br label %test.{n_lane}
'''
die_template = r'''
die.{lane}:
; Capture the actual value and print an error message.
call i32 (i8*, i8*, ...) @sprintf(i8* %str.ptr, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @error.{lane}, i32 0, i32 0), {scalar_ty} %v.{lane})
%length.{lane} = call i32 @strlen(i8* %str.ptr)
call i32 @write(i32 2, i8* %str.ptr, i32 %length.{lane})
call void @llvm.trap()
unreachable
'''
class Type:
def __init__(self, is_float, elt_width, elt_num):
self.is_float = is_float # Boolean
self.elt_width = elt_width # Integer
self.elt_num = elt_num # Integer
def dump(self):
if self.is_float:
str_elt = 'float' if self.elt_width == 32 else 'double'
else:
str_elt = 'i' + str(self.elt_width)
if self.elt_num == 1:
return str_elt
else:
return '<' + str(self.elt_num) + ' x ' + str_elt + '>'
def get_scalar_type(self):
return Type(self.is_float, self.elt_width, 1)
# Class to represent any value (variable) that can be used.
class Value:
def __init__(self, name, ty, value = None):
self.ty = ty # Type
self.name = name # String
self.value = value # list of integers or floating points
# Class to represent an IR instruction (shuffle/select).
class Instruction(Value):
def __init__(self, name, ty, op0, op1, mask):
Value.__init__(self, name, ty)
self.op0 = op0 # Value
self.op1 = op1 # Value
self.mask = mask # list of integers
def dump(self): pass
def calc_value(self): pass
# Class to represent an IR shuffle instruction
class ShufInstr(Instruction):
shuf_template = ' {name} = shufflevector {ty} {op0}, {ty} {op1}, <{num} x i32> {mask}\n'
def __init__(self, name, ty, op0, op1, mask):
Instruction.__init__(self, '%shuf' + name, ty, op0, op1, mask)
def dump(self):
str_mask = [('i32 ' + str(idx)) if idx != -1 else 'i32 undef' for idx in self.mask]
str_mask = '<' + (', ').join(str_mask) + '>'
return self.shuf_template.format(name = self.name, ty = self.ty.dump(), op0 = self.op0.name,
op1 = self.op1.name, num = self.ty.elt_num, mask = str_mask)
def calc_value(self):
if self.value != None:
print('Trying to calculate the value of a shuffle instruction twice')
exit(1)
result = []
for i in range(len(self.mask)):
index = self.mask[i]
if index < self.ty.elt_num and index >= 0:
result.append(self.op0.value[index])
elif index >= self.ty.elt_num:
index = index % self.ty.elt_num
result.append(self.op1.value[index])
else: # -1 => undef
result.append(-1)
self.value = result
# Class to represent an IR select instruction
class SelectInstr(Instruction):
sel_template = ' {name} = select <{num} x i1> {mask}, {ty} {op0}, {ty} {op1}\n'
def __init__(self, name, ty, op0, op1, mask):
Instruction.__init__(self, '%sel' + name, ty, op0, op1, mask)
def dump(self):
str_mask = [('i1 ' + str(idx)) if idx != -1 else 'i1 undef' for idx in self.mask]
str_mask = '<' + (', ').join(str_mask) + '>'
return self.sel_template.format(name = self.name, ty = self.ty.dump(), op0 = self.op0.name,
op1 = self.op1.name, num = self.ty.elt_num, mask = str_mask)
def calc_value(self):
if self.value != None:
print('Trying to calculate the value of a select instruction twice')
exit(1)
result = []
for i in range(len(self.mask)):
index = self.mask[i]
if index == 1:
result.append(self.op0.value[i])
elif index == 0:
result.append(self.op1.value[i])
else: # -1 => undef
result.append(-1)
self.value = result
# Returns a list of Values initialized with actual numbers according to the
# provided type
def gen_inputs(ty, num):
inputs = []
for i in range(num):
inp = []
for j in range(ty.elt_num):
if ty.is_float:
inp.append(float(i*ty.elt_num + j))
else:
inp.append((i*ty.elt_num + j) % (1 << ty.elt_width))
inputs.append(Value('%inp' + str(i), ty, inp))
return inputs
# Returns a random vector type to be tested
# In case one of the dimensions (scalar type/number of elements) is provided,
# fill the blank dimension and return appropriate Type object.
def get_random_type(ty, num_elts):
if ty != None:
if ty == 'i8':
is_float = False
width = 8
elif ty == 'i16':
is_float = False
width = 16
elif ty == 'i32':
is_float = False
width = 32
elif ty == 'i64':
is_float = False
width = 64
elif ty == 'f32':
is_float = True
width = 32
elif ty == 'f64':
is_float = True
width = 64
int_elt_widths = [8, 16, 32, 64]
float_elt_widths = [32, 64]
if num_elts == None:
num_elts = random.choice(range(2, 65))
if ty == None:
# 1 for integer type, 0 for floating-point
if random.randint(0,1):
is_float = False
width = random.choice(int_elt_widths)
else:
is_float = True
width = random.choice(float_elt_widths)
return Type(is_float, width, num_elts)
# Generate mask for shufflevector IR instruction, with SHUF_UNDEF_POS possibility
# of one undef index.
def gen_shuf_mask(ty):
mask = []
for i in range(ty.elt_num):
if SHUF_UNDEF_POS/ty.elt_num > random.random():
mask.append(-1)
else:
mask.append(random.randint(0, ty.elt_num*2 - 1))
return mask
# Generate mask for select IR instruction, with SEL_UNDEF_POS possibility
# of one undef index.
def gen_sel_mask(ty):
mask = []
for i in range(ty.elt_num):
if SEL_UNDEF_POS/ty.elt_num > random.random():
mask.append(-1)
else:
mask.append(random.randint(0, 1))
return mask
# Generate shuffle instructions with optional select instruction after.
def gen_insts(inputs, ty):
int_zero_init = Value('zeroinitializer', ty, [0]*ty.elt_num)
float_zero_init = Value('zeroinitializer', ty, [0.0]*ty.elt_num)
insts = []
name_idx = 0
while len(inputs) > 1:
# Choose 2 available Values - remove them from inputs list.
[idx0, idx1] = sorted(random.sample(range(len(inputs)), 2))
op0 = inputs[idx0]
op1 = inputs[idx1]
# Create the shuffle instruction.
shuf_mask = gen_shuf_mask(ty)
shuf_inst = ShufInstr(str(name_idx), ty, op0, op1, shuf_mask)
shuf_inst.calc_value()
# Add the new shuffle instruction to the list of instructions.
insts.append(shuf_inst)
# Optionally, add select instruction with the result of the previous shuffle.
if random.random() < ADD_SEL_POS:
# Either blending with a random Value or with an all-zero vector.
if random.random() < MERGE_SEL_POS:
op2 = random.choice(inputs)
else:
op2 = float_zero_init if ty.is_float else int_zero_init
select_mask = gen_sel_mask(ty)
select_inst = SelectInstr(str(name_idx), ty, shuf_inst, op2, select_mask)
select_inst.calc_value()
# Add the select instructions to the list of instructions and to the available Values.
insts.append(select_inst)
inputs.append(select_inst)
else:
# If the shuffle instruction is not followed by select, add it to the available Values.
inputs.append(shuf_inst)
del inputs[idx1]
del inputs[idx0]
name_idx += 1
return insts
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--seed', default=str(uuid.uuid4()),
help='A string used to seed the RNG')
parser.add_argument('--max-num-inputs', type=int, default=20,
help='Specify the maximum number of vector inputs for the test. (default: 20)')
parser.add_argument('--min-num-inputs', type=int, default=10,
help='Specify the minimum number of vector inputs for the test. (default: 10)')
parser.add_argument('--type', default=None,
help='''
Choose specific type to be tested.
i8, i16, i32, i64, f32 or f64.
(default: random)''')
parser.add_argument('--num-elts', default=None, type=int,
help='Choose specific number of vector elements to be tested. (default: random)')
args = parser.parse_args()
print('; The seed used for this test is ' + args.seed)
assert args.min_num_inputs < args.max_num_inputs , "Minimum value greater than maximum."
assert args.type in [None, 'i8', 'i16', 'i32', 'i64', 'f32', 'f64'], "Illegal type."
assert args.num_elts == None or args.num_elts > 0, "num_elts must be a positive integer."
random.seed(args.seed)
ty = get_random_type(args.type, args.num_elts)
inputs = gen_inputs(ty, random.randint(args.min_num_inputs, args.max_num_inputs))
inputs_str = (', ').join([inp.ty.dump() + ' ' + inp.name for inp in inputs])
inputs_values = [inp.value for inp in inputs]
insts = gen_insts(inputs, ty)
assert len(inputs) == 1, "Only one value should be left after generating phase"
res = inputs[0]
# print the actual test function by dumping the generated instructions.
insts_str = ''.join([inst.dump() for inst in insts])
print(test_template.format(ty = ty.dump(), inputs = inputs_str,
instructions = insts_str, last_name = res.name))
# Print the error message templates as global strings
for i in range(len(res.value)):
pad = ''.join(['\\00']*(31 - len(str(i)) - len(str(res.value[i]))))
print(error_template.format(lane = str(i), exp = str(res.value[i]),
padding = pad))
# Prepare the runtime checks and failure handlers.
scalar_ty = ty.get_scalar_type()
check_die = ''
i_f = 'f' if ty.is_float else 'i'
ordered = 'o' if ty.is_float else ''
for i in range(len(res.value)):
if res.value[i] != -1:
# Emit runtime check for each non-undef expected value.
check_die += check_template.format(lane = str(i), n_lane = str(i+1),
ty = ty.dump(), i_f = i_f, scalar_ty = scalar_ty.dump(),
exp = str(res.value[i]), ordered = ordered)
# Emit failure handler for each runtime check with proper error message
check_die += die_template.format(lane = str(i), scalar_ty = scalar_ty.dump())
else:
# Ignore lanes with undef result
check_die += undef_check_template.format(lane = str(i), n_lane = str(i+1))
check_die += '\ntest.' + str(len(res.value)) + ':\n'
check_die += ' ret i32 0'
# Prepare the input values passed to the test function.
inputs_values = [', '.join([scalar_ty.dump() + ' ' + str(i) for i in inp]) for inp in inputs_values]
inputs = ', '.join([ty.dump() + ' <' + inp + '>' for inp in inputs_values])
print(main_template.format(ty = ty.dump(), inputs = inputs, check_die = check_die))
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/shuffle_select_fuzz_tester.py |
#!/usr/bin/env python
#changelog:
#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
#nodes such as %tmp.1.i and %tmp._i.3
#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
#%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
#the comments
#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
#than removing all lines for which the lable CONTAINS %tmp.#
from __future__ import print_function
import re
import sys
if( len(sys.argv) < 3 ):
print('usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>')
sys.exit(1)
#get a file object
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
#we'll get this one line at a time...while we could just put the whole thing in a string
#it would kill old computers
buffer = input.readline()
while buffer != '':
if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
#skip next line, write neither this line nor the next
buffer = input.readline()
else:
#this isn't a tmp Node, we can write it
output.write(buffer)
#prepare for the next iteration
buffer = input.readline()
input.close()
output.close()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/DSAclean.py |
#!/usr/bin/env python
'''A utility to update LLVM IR CHECK lines in C/C++ FileCheck test files.
Example RUN lines in .c/.cc test files:
// RUN: %clang -emit-llvm -S %s -o - -O2 | FileCheck %s
// RUN: %clangxx -emit-llvm -S %s -o - -O2 | FileCheck -check-prefix=CHECK-A %s
Usage:
% utils/update_cc_test_checks.py --llvm-bin=release/bin test/a.cc
% utils/update_cc_test_checks.py --clang=release/bin/clang /tmp/c/a.cc
'''
from __future__ import print_function
import argparse
import collections
import distutils.spawn
import json
import os
import re
import shlex
import subprocess
import sys
import tempfile
from UpdateTestChecks import common
SUBST = {
'%clang': [],
'%clang_cc1': ['-cc1'],
'%clangxx': ['--driver-mode=g++'],
}
def get_line2spell_and_mangled(args, clang_args):
ret = {}
# Use clang's JSON AST dump to get the mangled name
json_dump_args = [args.clang] + clang_args + ['-fsyntax-only', '-o', '-']
if '-cc1' not in json_dump_args:
# For tests that invoke %clang instead if %clang_cc1 we have to use
# -Xclang -ast-dump=json instead:
json_dump_args.append('-Xclang')
json_dump_args.append('-ast-dump=json')
common.debug('Running', ' '.join(json_dump_args))
popen = subprocess.Popen(json_dump_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = popen.communicate()
if popen.returncode != 0:
sys.stderr.write('Failed to run ' + ' '.join(json_dump_args) + '\n')
sys.stderr.write(stderr)
sys.stderr.write(stdout)
sys.exit(2)
# Parse the clang JSON and add all children of type FunctionDecl.
# TODO: Should we add checks for global variables being emitted?
def parse_clang_ast_json(node):
node_kind = node['kind']
# Recurse for the following nodes that can contain nested function decls:
if node_kind in ('NamespaceDecl', 'LinkageSpecDecl', 'TranslationUnitDecl',
'CXXRecordDecl'):
if 'inner' in node:
for inner in node['inner']:
parse_clang_ast_json(inner)
# Otherwise we ignore everything except functions:
if node_kind not in ('FunctionDecl', 'CXXMethodDecl', 'CXXConstructorDecl',
'CXXDestructorDecl', 'CXXConversionDecl'):
return
if node.get('isImplicit') is True and node.get('storageClass') == 'extern':
common.debug('Skipping builtin function:', node['name'], '@', node['loc'])
return
common.debug('Found function:', node['kind'], node['name'], '@', node['loc'])
line = node['loc'].get('line')
# If there is no line it is probably a builtin function -> skip
if line is None:
common.debug('Skipping function without line number:', node['name'], '@', node['loc'])
return
# If there is no 'inner' object, it is a function declaration and we can
# skip it. However, function declarations may also contain an 'inner' list,
# but in that case it will only contains ParmVarDecls. If we find an entry
# that is not a ParmVarDecl, we know that this is a function definition.
has_body = False
if 'inner' in node:
for i in node['inner']:
if i.get('kind', 'ParmVarDecl') != 'ParmVarDecl':
has_body = True
break
if not has_body:
common.debug('Skipping function without body:', node['name'], '@', node['loc'])
return
spell = node['name']
mangled = node.get('mangledName', spell)
ret[int(line)-1] = (spell, mangled)
ast = json.loads(stdout)
if ast['kind'] != 'TranslationUnitDecl':
common.error('Clang AST dump JSON format changed?')
sys.exit(2)
parse_clang_ast_json(ast)
for line, func_name in sorted(ret.items()):
common.debug('line {}: found function {}'.format(line+1, func_name), file=sys.stderr)
if not ret:
common.warn('Did not find any functions using', ' '.join(json_dump_args))
return ret
def str_to_commandline(value):
if not value:
return []
return shlex.split(value)
def infer_dependent_args(args):
if not args.clang:
if not args.llvm_bin:
args.clang = 'clang'
else:
args.clang = os.path.join(args.llvm_bin, 'clang')
if not args.opt:
if not args.llvm_bin:
args.opt = 'opt'
else:
args.opt = os.path.join(args.llvm_bin, 'opt')
def config():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--llvm-bin', help='llvm $prefix/bin path')
parser.add_argument('--clang',
help='"clang" executable, defaults to $llvm_bin/clang')
parser.add_argument('--clang-args', default=[], type=str_to_commandline,
help='Space-separated extra args to clang, e.g. --clang-args=-v')
parser.add_argument('--opt',
help='"opt" executable, defaults to $llvm_bin/opt')
parser.add_argument(
'--functions', nargs='+', help='A list of function name regexes. '
'If specified, update CHECK lines for functions matching at least one regex')
parser.add_argument(
'--x86_extra_scrub', action='store_true',
help='Use more regex for x86 matching to reduce diffs between various subtargets')
parser.add_argument('--function-signature', action='store_true',
help='Keep function signature information around for the check line')
parser.add_argument('--check-attributes', action='store_true',
help='Check "Function Attributes" for functions')
parser.add_argument('tests', nargs='+')
args = common.parse_commandline_args(parser)
infer_dependent_args(args)
if not distutils.spawn.find_executable(args.clang):
print('Please specify --llvm-bin or --clang', file=sys.stderr)
sys.exit(1)
# Determine the builtin includes directory so that we can update tests that
# depend on the builtin headers. See get_clang_builtin_include_dir() and
# use_clang() in llvm/utils/lit/lit/llvm/config.py.
try:
builtin_include_dir = subprocess.check_output(
[args.clang, '-print-file-name=include']).decode().strip()
SUBST['%clang_cc1'] = ['-cc1', '-internal-isystem', builtin_include_dir,
'-nostdsysteminc']
except subprocess.CalledProcessError:
common.warn('Could not determine clang builtins directory, some tests '
'might not update correctly.')
if not distutils.spawn.find_executable(args.opt):
# Many uses of this tool will not need an opt binary, because it's only
# needed for updating a test that runs clang | opt | FileCheck. So we
# defer this error message until we find that opt is actually needed.
args.opt = None
return args, parser
def get_function_body(builder, args, filename, clang_args, extra_commands,
prefixes):
# TODO Clean up duplication of asm/common build_function_body_dictionary
# Invoke external tool and extract function bodies.
raw_tool_output = common.invoke_tool(args.clang, clang_args, filename)
for extra_command in extra_commands:
extra_args = shlex.split(extra_command)
with tempfile.NamedTemporaryFile() as f:
f.write(raw_tool_output.encode())
f.flush()
if extra_args[0] == 'opt':
if args.opt is None:
print(filename, 'needs to run opt. '
'Please specify --llvm-bin or --opt', file=sys.stderr)
sys.exit(1)
extra_args[0] = args.opt
raw_tool_output = common.invoke_tool(extra_args[0],
extra_args[1:], f.name)
if '-emit-llvm' in clang_args:
builder.process_run_line(
common.OPT_FUNCTION_RE, common.scrub_body, raw_tool_output,
prefixes)
else:
print('The clang command line should include -emit-llvm as asm tests '
'are discouraged in Clang testsuite.', file=sys.stderr)
sys.exit(1)
def main():
initial_args, parser = config()
script_name = os.path.basename(__file__)
for ti in common.itertests(initial_args.tests, parser, 'utils/' + script_name,
comment_prefix='//', argparse_callback=infer_dependent_args):
# Build a list of clang command lines and check prefixes from RUN lines.
run_list = []
line2spell_and_mangled_list = collections.defaultdict(list)
for l in ti.run_lines:
commands = [cmd.strip() for cmd in l.split('|')]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(commands[0])
if m:
triple_in_cmd = m.groups()[0]
# Apply %clang substitution rule, replace %s by `filename`, and append args.clang_args
clang_args = shlex.split(commands[0])
if clang_args[0] not in SUBST:
print('WARNING: Skipping non-clang RUN line: ' + l, file=sys.stderr)
continue
clang_args[0:1] = SUBST[clang_args[0]]
clang_args = [ti.path if i == '%s' else i for i in clang_args] + ti.args.clang_args
# Permit piping the output through opt
if not (len(commands) == 2 or
(len(commands) == 3 and commands[1].startswith('opt'))):
print('WARNING: Skipping non-clang RUN line: ' + l, file=sys.stderr)
# Extract -check-prefix in FileCheck args
filecheck_cmd = commands[-1]
common.verify_filecheck_prefixes(filecheck_cmd)
if not filecheck_cmd.startswith('FileCheck '):
print('WARNING: Skipping non-FileChecked RUN line: ' + l, file=sys.stderr)
continue
check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
# Execute clang, generate LLVM IR, and extract functions.
builder = common.FunctionTestBuilder(
run_list=run_list,
flags=ti.args,
scrubber_args=[])
for prefixes, clang_args, extra_commands, triple_in_cmd in run_list:
common.debug('Extracted clang cmd: clang {}'.format(clang_args))
common.debug('Extracted FileCheck prefixes: {}'.format(prefixes))
get_function_body(builder, ti.args, ti.path, clang_args, extra_commands,
prefixes)
# Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
# mangled names. Forward all clang args for now.
for k, v in get_line2spell_and_mangled(ti.args, clang_args).items():
line2spell_and_mangled_list[k].append(v)
func_dict = builder.finish_and_get_func_dict()
global_vars_seen_dict = {}
prefix_set = set([prefix for p in run_list for prefix in p[0]])
output_lines = []
include_generated_funcs = common.find_arg_in_test(ti,
lambda args: ti.args.include_generated_funcs,
'--include-generated-funcs',
True)
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# It turns out that when clang generates functions (for example, with
# -fopenmp), it can sometimes cause functions to be re-ordered in the
# output, even functions that exist in the source file. Therefore we
# can't insert check lines before each source function and instead have to
# put them at the end. So the first thing to do is dump out the source
# lines.
common.dump_input_lines(output_lines, ti, prefix_set, '//')
# Now generate all the checks.
def check_generator(my_output_lines, prefixes, func):
if '-emit-llvm' in clang_args:
common.add_ir_checks(my_output_lines, '//',
prefixes,
func_dict, func, False,
ti.args.function_signature,
global_vars_seen_dict)
else:
asm.add_asm_checks(my_output_lines, '//',
prefixes,
func_dict, func)
common.add_checks_at_end(output_lines, run_list, builder.func_order(),
'//', lambda my_output_lines, prefixes, func:
check_generator(my_output_lines,
prefixes, func))
else:
# Normal mode. Put checks before each source function.
for line_info in ti.iterlines(output_lines):
idx = line_info.line_number
line = line_info.line
args = line_info.args
include_line = True
m = common.CHECK_RE.match(line)
if m and m.group(1) in prefix_set:
continue # Don't append the existing CHECK lines
if idx in line2spell_and_mangled_list:
added = set()
for spell, mangled in line2spell_and_mangled_list[idx]:
# One line may contain multiple function declarations.
# Skip if the mangled name has been added before.
# The line number may come from an included file,
# we simply require the spelling name to appear on the line
# to exclude functions from other files.
if mangled in added or spell not in line:
continue
if args.functions is None or any(re.search(regex, spell) for regex in args.functions):
last_line = output_lines[-1].strip()
while last_line == '//':
# Remove the comment line since we will generate a new comment
# line as part of common.add_ir_checks()
output_lines.pop()
last_line = output_lines[-1].strip()
if added:
output_lines.append('//')
added.add(mangled)
common.add_ir_checks(output_lines, '//', run_list, func_dict, mangled,
False, args.function_signature, global_vars_seen_dict)
if line.rstrip('\n') == '//':
include_line = False
if include_line:
output_lines.append(line.rstrip('\n'))
common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
with open(ti.path, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
return 0
if __name__ == '__main__':
sys.exit(main())
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_cc_test_checks.py |
#!/usr/bin/env python3
import re, sys
def fix_string(s):
TYPE = re.compile('\s*(i[0-9]+|float|double|x86_fp80|fp128|ppc_fp128|\[\[.*?\]\]|\[2 x \[\[[A-Z_0-9]+\]\]\]|<.*?>|{.*?}|\[[0-9]+ x .*?\]|%["a-z:A-Z0-9._]+({{.*?}})?|%{{.*?}}|{{.*?}}|\[\[.*?\]\])(\s*(\*|addrspace\(.*?\)|dereferenceable\(.*?\)|byval\(.*?\)|sret|zeroext|inreg|returned|signext|nocapture|align \d+|swiftself|swifterror|readonly|noalias|inalloca|nocapture))*\s*')
counter = 0
if 'i32{{.*}}' in s:
counter = 1
at_pos = s.find('@')
if at_pos == -1:
at_pos = 0
annoying_pos = s.find('{{[^(]+}}')
if annoying_pos != -1:
at_pos = annoying_pos + 9
paren_pos = s.find('(', at_pos)
if paren_pos == -1:
return s
res = s[:paren_pos+1]
s = s[paren_pos+1:]
m = TYPE.match(s)
while m:
res += m.group()
s = s[m.end():]
if s.startswith(',') or s.startswith(')'):
res += f' %{counter}'
counter += 1
next_arg = s.find(',')
if next_arg == -1:
break
res += s[:next_arg+1]
s = s[next_arg+1:]
m = TYPE.match(s)
return res+s
def process_file(contents):
PREFIX = re.compile(r'check-prefix(es)?(=|\s+)([a-zA-Z0-9,]+)')
check_prefixes = ['CHECK']
result = ''
for line in contents.split('\n'):
if 'FileCheck' in line:
m = PREFIX.search(line)
if m:
check_prefixes.extend(m.group(3).split(','))
found_check = False
for prefix in check_prefixes:
if prefix in line:
found_check = True
break
if not found_check or 'define' not in line:
result += line + '\n'
continue
# We have a check for a function definition. Number the args.
line = fix_string(line)
result += line + '\n'
return result
def main():
print(f'Processing {sys.argv[1]}')
f = open(sys.argv[1])
content = f.read()
f.close()
content = process_file(content)
f = open(sys.argv[1], 'w')
f.write(content)
f.close()
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/add_argument_names.py |
#!/usr/bin/env python
"""
wciia - Whose Code Is It Anyway
Determines code owner of the file/folder relative to the llvm source root.
Code owner is determined from the content of the CODE_OWNERS.TXT
by parsing the D: field
usage:
utils/wciia.py path
limitations:
- must be run from llvm source root
- very simplistic algorithm
- only handles * as a wildcard
- not very user friendly
- does not handle the proposed F: field
"""
from __future__ import print_function
import os
code_owners = {}
def process_files_and_folders(owner):
filesfolders = owner['filesfolders']
# paths must be in ( ... ) so strip them
lpar = filesfolders.find('(')
rpar = filesfolders.rfind(')')
if rpar <= lpar:
# give up
return
paths = filesfolders[lpar+1:rpar]
# split paths
owner['paths'] = []
for path in paths.split():
owner['paths'].append(path)
def process_code_owner(owner):
if 'filesfolders' in owner:
filesfolders = owner['filesfolders']
else:
# print "F: field missing, using D: field"
owner['filesfolders'] = owner['description']
process_files_and_folders(owner)
code_owners[owner['name']] = owner
# process CODE_OWNERS.TXT first
code_owners_file = open("CODE_OWNERS.TXT", "r").readlines()
code_owner = {}
for line in code_owners_file:
for word in line.split():
if word == "N:":
name = line[2:].strip()
if code_owner:
process_code_owner(code_owner)
code_owner = {}
# reset the values
code_owner['name'] = name
if word == "E:":
email = line[2:].strip()
code_owner['email'] = email
if word == "D:":
description = line[2:].strip()
code_owner['description'] = description
if word == "F:":
filesfolders = line[2:].strip()
code_owner['filesfolders'].append(filesfolders)
def find_owners(fpath):
onames = []
lmatch = -1
# very simplistic way of findning the best match
for name in code_owners:
owner = code_owners[name]
if 'paths' in owner:
for path in owner['paths']:
# print "searching (" + path + ")"
# try exact match
if fpath == path:
return name
# see if path ends with a *
rstar = path.rfind('*')
if rstar>0:
# try the longest match,
rpos = -1
if len(fpath) < len(path):
rpos = path.find(fpath)
if rpos == 0:
onames.append(name)
onames.append('Chris Lattner')
return onames
# now lest try to find the owner of the file or folder
import sys
if len(sys.argv) < 2:
print("usage " + sys.argv[0] + " file_or_folder")
exit(-1)
# the path we are checking
path = str(sys.argv[1])
# check if this is real path
if not os.path.exists(path):
print("path (" + path + ") does not exist")
exit(-1)
owners_name = find_owners(path)
# be grammatically correct
print("The owner(s) of the (" + path + ") is(are) : " + str(owners_name))
exit(0)
# bottom up walk of the current .
# not yet used
root = "."
for dir,subdirList,fileList in os.walk( root , topdown=False ) :
print("dir :" , dir)
for fname in fileList :
print("-" , fname)
print()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/wciia.py |
#!/usr/bin/env python3
import os
import re
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
def remove_prefix(i, d=0):
if d == 100:
return 2
s = os.popen('llvm-lit -a ' + i).read()
r = re.search('no check strings found with (?:prefix|prefixes) \'([^:]+)', s)
with open(i, 'r+') as f:
s = f.read()
if r:
p = r.group(1)
s = re.sub('=' + p + ',', '=', s)
s = re.sub(',' + p + '([, \n])', '\\1', s)
s = re.sub('\s+-?-check-prefix=' + p + '([ \n])', '\\1', s)
else:
s = re.sub('-?-check-prefixes=([\w-]+)(\Z|[ \t\n])', '--check-prefix=\\1\\2', s)
t = re.search('-?-check-(?:prefix|prefixes)=([^ ]+)\s+-?-check-(?:prefix|prefixes)=([^ ]+)', s)
while t:
s = re.sub(t.group(), '--check-prefixes=' + t.group(1) + ',' + t.group(2), s)
t = re.search('-?-check-(?:prefix|prefixes)=([^ ]+)\s+-?-check-(?:prefix|prefixes)=([^ ]+)', s)
s = re.sub('\s+-?-check-prefix=CHECK[ \t]*\n', '\n', s)
f.truncate(0)
f.seek(0)
f.write(s)
if not r:
t = re.search('Assertions have been autogenerated by (.*)', s)
if t:
s = os.popen('llvm/' + t.group(1) + ' ' + i + ' 2>&1').read()
if 'had conflicting output from different RUN lines for all functions' in s:
return -1
s = os.popen('git diff ' + i).read()
if re.search('\n(?:-+)\n', s) or re.search('\n[+-].*(?<!RUN):', s):
return 1
return 0
return remove_prefix(i, d+1)
with ThreadPoolExecutor(max_workers=32) as e:
f = []
c = []
a = []
t = { e.submit(remove_prefix, i): i for i in sys.argv[1:] }
for i in as_completed(t):
if i.result() == 0:
print('DONE:', end=' ')
elif i.result() == -1:
print('FAIL:', end=' ')
f.append(t[i])
elif i.result() == 1:
print('CHANGE:', end=' ')
c.append(t[i])
else:
print('ABORT:', end=' ')
a.append(t[i])
print(t[i])
for i in [ (f, 'Failed'), (c, 'Changed'), (a, 'Aborted') ]:
if i[0]:
print('********************\n%s Tests (%d):' % (i[1], len(i[0])))
for j in i[0]:
print(' ' + j)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_test_prefix.py |
# Given a path to llvm-objdump and a directory tree, spider the directory tree
# dumping every object file encountered with correct options needed to demangle
# symbols in the object file, and collect statistics about failed / crashed
# demanglings. Useful for stress testing the demangler against a large corpus
# of inputs.
from __future__ import print_function
import argparse
import functools
import os
import re
import sys
import subprocess
import traceback
from multiprocessing import Pool
import multiprocessing
args = None
def parse_line(line):
question = line.find('?')
if question == -1:
return None, None
open_paren = line.find('(', question)
if open_paren == -1:
return None, None
close_paren = line.rfind(')', open_paren)
if open_paren == -1:
return None, None
mangled = line[question : open_paren]
demangled = line[open_paren+1 : close_paren]
return mangled.strip(), demangled.strip()
class Result(object):
def __init__(self):
self.crashed = []
self.file = None
self.nsymbols = 0
self.errors = set()
self.nfiles = 0
class MapContext(object):
def __init__(self):
self.rincomplete = None
self.rcumulative = Result()
self.pending_objs = []
self.npending = 0
def process_file(path, objdump):
r = Result()
r.file = path
popen_args = [objdump, '-t', '-demangle', path]
p = subprocess.Popen(popen_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
r.crashed = [r.file]
return r
output = stdout.decode('utf-8')
for line in output.splitlines():
mangled, demangled = parse_line(line)
if mangled is None:
continue
r.nsymbols += 1
if "invalid mangled name" in demangled:
r.errors.add(mangled)
return r
def add_results(r1, r2):
r1.crashed.extend(r2.crashed)
r1.errors.update(r2.errors)
r1.nsymbols += r2.nsymbols
r1.nfiles += r2.nfiles
def print_result_row(directory, result):
print("[{0} files, {1} crashes, {2} errors, {3} symbols]: '{4}'".format(
result.nfiles, len(result.crashed), len(result.errors), result.nsymbols, directory))
def process_one_chunk(pool, chunk_size, objdump, context):
objs = []
incomplete = False
dir_results = {}
ordered_dirs = []
while context.npending > 0 and len(objs) < chunk_size:
this_dir = context.pending_objs[0][0]
ordered_dirs.append(this_dir)
re = Result()
if context.rincomplete is not None:
re = context.rincomplete
context.rincomplete = None
dir_results[this_dir] = re
re.file = this_dir
nneeded = chunk_size - len(objs)
objs_this_dir = context.pending_objs[0][1]
navail = len(objs_this_dir)
ntaken = min(nneeded, navail)
objs.extend(objs_this_dir[0:ntaken])
remaining_objs_this_dir = objs_this_dir[ntaken:]
context.pending_objs[0] = (context.pending_objs[0][0], remaining_objs_this_dir)
context.npending -= ntaken
if ntaken == navail:
context.pending_objs.pop(0)
else:
incomplete = True
re.nfiles += ntaken
assert(len(objs) == chunk_size or context.npending == 0)
copier = functools.partial(process_file, objdump=objdump)
mapped_results = list(pool.map(copier, objs))
for mr in mapped_results:
result_dir = os.path.dirname(mr.file)
result_entry = dir_results[result_dir]
add_results(result_entry, mr)
# It's only possible that a single item is incomplete, and it has to be the
# last item.
if incomplete:
context.rincomplete = dir_results[ordered_dirs[-1]]
ordered_dirs.pop()
# Now ordered_dirs contains a list of all directories which *did* complete.
for c in ordered_dirs:
re = dir_results[c]
add_results(context.rcumulative, re)
print_result_row(c, re)
def process_pending_files(pool, chunk_size, objdump, context):
while context.npending >= chunk_size:
process_one_chunk(pool, chunk_size, objdump, context)
def go():
global args
obj_dir = args.dir
extensions = args.extensions.split(',')
extensions = [x if x[0] == '.' else '.' + x for x in extensions]
pool_size = 48
pool = Pool(processes=pool_size)
try:
nfiles = 0
context = MapContext()
for root, dirs, files in os.walk(obj_dir):
root = os.path.normpath(root)
pending = []
for f in files:
file, ext = os.path.splitext(f)
if not ext in extensions:
continue
nfiles += 1
full_path = os.path.join(root, f)
full_path = os.path.normpath(full_path)
pending.append(full_path)
# If this directory had no object files, just print a default
# status line and continue with the next dir
if len(pending) == 0:
print_result_row(root, Result())
continue
context.npending += len(pending)
context.pending_objs.append((root, pending))
# Drain the tasks, `pool_size` at a time, until we have less than
# `pool_size` tasks remaining.
process_pending_files(pool, pool_size, args.objdump, context)
assert(context.npending < pool_size);
process_one_chunk(pool, pool_size, args.objdump, context)
total = context.rcumulative
nfailed = len(total.errors)
nsuccess = total.nsymbols - nfailed
ncrashed = len(total.crashed)
if (nfailed > 0):
print("Failures:")
for m in sorted(total.errors):
print(" " + m)
if (ncrashed > 0):
print("Crashes:")
for f in sorted(total.crashed):
print(" " + f)
print("Summary:")
spct = float(nsuccess)/float(total.nsymbols)
fpct = float(nfailed)/float(total.nsymbols)
cpct = float(ncrashed)/float(nfiles)
print("Processed {0} object files.".format(nfiles))
print("{0}/{1} symbols successfully demangled ({2:.4%})".format(nsuccess, total.nsymbols, spct))
print("{0} symbols could not be demangled ({1:.4%})".format(nfailed, fpct))
print("{0} files crashed while demangling ({1:.4%})".format(ncrashed, cpct))
except:
traceback.print_exc()
pool.close()
pool.join()
if __name__ == "__main__":
def_obj = 'obj' if sys.platform == 'win32' else 'o'
parser = argparse.ArgumentParser(description='Demangle all symbols in a tree of object files, looking for failures.')
parser.add_argument('dir', type=str, help='the root directory at which to start crawling')
parser.add_argument('--objdump', type=str, help='path to llvm-objdump. If not specified ' +
'the tool is located as if by `which llvm-objdump`.')
parser.add_argument('--extensions', type=str, default=def_obj,
help='comma separated list of extensions to demangle (e.g. `o,obj`). ' +
'By default this will be `obj` on Windows and `o` otherwise.')
args = parser.parse_args()
multiprocessing.freeze_support()
go()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/demangle_tree.py |
"""
LLDB Formatters for LLVM data types.
Load into LLDB with 'command script import /path/to/lldbDataFormatters.py'
"""
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('type category define -e llvm -l c++')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVectorImpl<.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVector<.+,.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.ArrayRefSynthProvider '
'-x "^llvm::ArrayRef<.+>$"')
debugger.HandleCommand('type summary add -w llvm '
'-F lldbDataFormatters.OptionalSummaryProvider '
'-x "^llvm::Optional<.+>$"')
debugger.HandleCommand('type summary add -w llvm '
'-F lldbDataFormatters.SmallStringSummaryProvider '
'-x "^llvm::SmallString<.+>$"')
debugger.HandleCommand('type summary add -w llvm '
'-F lldbDataFormatters.StringRefSummaryProvider '
'-x "^llvm::StringRef$"')
debugger.HandleCommand('type summary add -w llvm '
'-F lldbDataFormatters.ConstStringSummaryProvider '
'-x "^lldb_private::ConstString$"')
# Pretty printer for llvm::SmallVector/llvm::SmallVectorImpl
class SmallVectorSynthProvider:
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
return self.size.GetValueAsUnsigned(0)
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
# Do bounds checking.
if index < 0:
return None
if index >= self.num_children():
return None;
offset = index * self.type_size
return self.begin.CreateChildAtOffset('['+str(index)+']',
offset, self.data_type)
def update(self):
self.begin = self.valobj.GetChildMemberWithName('BeginX')
self.size = self.valobj.GetChildMemberWithName('Size')
the_type = self.valobj.GetType()
# If this is a reference type we have to dereference it to get to the
# template parameter.
if the_type.IsReferenceType():
the_type = the_type.GetDereferencedType()
self.data_type = the_type.GetTemplateArgumentType(0)
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
class ArrayRefSynthProvider:
""" Provider for llvm::ArrayRef """
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
return self.length
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
if index < 0 or index >= self.num_children():
return None;
offset = index * self.type_size
return self.data.CreateChildAtOffset('[' + str(index) + ']',
offset, self.data_type)
def update(self):
self.data = self.valobj.GetChildMemberWithName('Data')
length_obj = self.valobj.GetChildMemberWithName('Length')
self.length = length_obj.GetValueAsUnsigned(0)
self.data_type = self.data.GetType().GetPointeeType()
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
def OptionalSummaryProvider(valobj, internal_dict):
storage = valobj.GetChildMemberWithName('Storage')
if not storage:
storage = valobj
failure = 2
hasVal = storage.GetChildMemberWithName('hasVal').GetValueAsUnsigned(failure)
if hasVal == failure:
return '<could not read llvm::Optional>'
if hasVal == 0:
return 'None'
underlying_type = storage.GetType().GetTemplateArgumentType(0)
storage = storage.GetChildMemberWithName('value')
return str(storage.Cast(underlying_type))
def SmallStringSummaryProvider(valobj, internal_dict):
num_elements = valobj.GetNumChildren()
res = "\""
for i in range(0, num_elements):
c = valobj.GetChildAtIndex(i).GetValue()
if c:
res += c.strip("'")
res += "\""
return res
def StringRefSummaryProvider(valobj, internal_dict):
if valobj.GetNumChildren() == 2:
# StringRef's are also used to point at binary blobs in memory,
# so filter out suspiciously long strings.
max_length = 256
length = valobj.GetChildAtIndex(1).GetValueAsUnsigned(max_length)
if length == 0:
return "NULL"
if length < max_length:
return valobj.GetChildAtIndex(0).GetSummary()
return ""
def ConstStringSummaryProvider(valobj, internal_dict):
if valobj.GetNumChildren() == 1:
return valobj.GetChildAtIndex(0).GetSummary()
return ""
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lldbDataFormatters.py |
#!/usr/bin/env python
"""
Summarize the information in the given coverage files.
Emits the number of rules covered or the percentage of rules covered depending
on whether --num-rules has been used to specify the total number of rules.
"""
from __future__ import print_function
import argparse
import struct
class FileFormatError(Exception):
pass
def backend_int_pair(s):
backend, sep, value = s.partition('=')
if sep is None:
raise argparse.ArgumentTypeError("'=' missing, expected name=value")
if not backend:
raise argparse.ArgumentTypeError("Expected name=value")
if not value:
raise argparse.ArgumentTypeError("Expected name=value")
return backend, int(value)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('input', nargs='+')
parser.add_argument('--num-rules', type=backend_int_pair, action='append',
metavar='BACKEND=NUM',
help='Specify the number of rules for a backend')
args = parser.parse_args()
covered_rules = {}
for input_filename in args.input:
with open(input_filename, 'rb') as input_fh:
data = input_fh.read()
pos = 0
while data:
backend, _, data = data.partition('\0')
pos += len(backend)
pos += 1
if len(backend) == 0:
raise FileFormatError()
backend, = struct.unpack("%ds" % len(backend), backend)
while data:
if len(data) < 8:
raise FileFormatError()
rule_id, = struct.unpack("Q", data[:8])
pos += 8
data = data[8:]
if rule_id == (2 ** 64) - 1:
break
covered_rules[backend] = covered_rules.get(backend, {})
covered_rules[backend][rule_id] = covered_rules[backend].get(rule_id, 0) + 1
num_rules = dict(args.num_rules)
for backend, rules_for_backend in covered_rules.items():
if backend in num_rules:
print("%s: %3.2f%% of rules covered" % (backend, float(len(rules_for_backend)) / num_rules[backend]) * 100)
else:
print("%s: %d rules covered" % (backend, len(rules_for_backend)))
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/llvm-gisel-cov.py |
#!/usr/bin/env python3
"""
This script:
- Builds clang with user-defined flags
- Uses that clang to build an instrumented clang, which can be used to collect
PGO samples
- Builds a user-defined set of sources (default: clang) to act as a
"benchmark" to generate a PGO profile
- Builds clang once more with the PGO profile generated above
This is a total of four clean builds of clang (by default). This may take a
while. :)
This scripts duplicates https://llvm.org/docs/AdvancedBuilds.html#multi-stage-pgo
Eventually, it will be updated to instead call the cmake cache mentioned there.
"""
import argparse
import collections
import multiprocessing
import os
import shlex
import shutil
import subprocess
import sys
### User configuration
# If you want to use a different 'benchmark' than building clang, make this
# function do what you want. out_dir is the build directory for clang, so all
# of the clang binaries will live under "${out_dir}/bin/". Using clang in
# ${out_dir} will magically have the profiles go to the right place.
#
# You may assume that out_dir is a freshly-built directory that you can reach
# in to build more things, if you'd like.
def _run_benchmark(env, out_dir, include_debug_info):
"""The 'benchmark' we run to generate profile data."""
target_dir = env.output_subdir('instrumentation_run')
# `check-llvm` and `check-clang` are cheap ways to increase coverage. The
# former lets us touch on the non-x86 backends a bit if configured, and the
# latter gives us more C to chew on (and will send us through diagnostic
# paths a fair amount, though the `if (stuff_is_broken) { diag() ... }`
# branches should still heavily be weighted in the not-taken direction,
# since we built all of LLVM/etc).
_build_things_in(env, out_dir, what=['check-llvm', 'check-clang'])
# Building tblgen gets us coverage; don't skip it. (out_dir may also not
# have them anyway, but that's less of an issue)
cmake = _get_cmake_invocation_for_bootstrap_from(
env, out_dir, skip_tablegens=False)
if include_debug_info:
cmake.add_flag('CMAKE_BUILD_TYPE', 'RelWithDebInfo')
_run_fresh_cmake(env, cmake, target_dir)
# Just build all the things. The more data we have, the better.
_build_things_in(env, target_dir, what=['all'])
### Script
class CmakeInvocation:
_cflags = ['CMAKE_C_FLAGS', 'CMAKE_CXX_FLAGS']
_ldflags = [
'CMAKE_EXE_LINKER_FLAGS',
'CMAKE_MODULE_LINKER_FLAGS',
'CMAKE_SHARED_LINKER_FLAGS',
]
def __init__(self, cmake, maker, cmake_dir):
self._prefix = [cmake, '-G', maker, cmake_dir]
# Map of str -> (list|str).
self._flags = {}
for flag in CmakeInvocation._cflags + CmakeInvocation._ldflags:
self._flags[flag] = []
def add_new_flag(self, key, value):
self.add_flag(key, value, allow_overwrites=False)
def add_flag(self, key, value, allow_overwrites=True):
if key not in self._flags:
self._flags[key] = value
return
existing_value = self._flags[key]
if isinstance(existing_value, list):
existing_value.append(value)
return
if not allow_overwrites:
raise ValueError('Invalid overwrite of %s requested' % key)
self._flags[key] = value
def add_cflags(self, flags):
# No, I didn't intend to append ['-', 'O', '2'] to my flags, thanks :)
assert not isinstance(flags, str)
for f in CmakeInvocation._cflags:
self._flags[f].extend(flags)
def add_ldflags(self, flags):
assert not isinstance(flags, str)
for f in CmakeInvocation._ldflags:
self._flags[f].extend(flags)
def to_args(self):
args = self._prefix.copy()
for key, value in sorted(self._flags.items()):
if isinstance(value, list):
# We preload all of the list-y values (cflags, ...). If we've
# nothing to add, don't.
if not value:
continue
value = ' '.join(value)
arg = '-D' + key
if value != '':
arg += '=' + value
args.append(arg)
return args
class Env:
def __init__(self, llvm_dir, use_make, output_dir, default_cmake_args,
dry_run):
self.llvm_dir = llvm_dir
self.use_make = use_make
self.output_dir = output_dir
self.default_cmake_args = default_cmake_args.copy()
self.dry_run = dry_run
def get_default_cmake_args_kv(self):
return self.default_cmake_args.items()
def get_cmake_maker(self):
return 'Ninja' if not self.use_make else 'Unix Makefiles'
def get_make_command(self):
if self.use_make:
return ['make', '-j{}'.format(multiprocessing.cpu_count())]
return ['ninja']
def output_subdir(self, name):
return os.path.join(self.output_dir, name)
def has_llvm_subproject(self, name):
if name == 'compiler-rt':
subdir = '../compiler-rt'
elif name == 'clang':
subdir = '../clang'
else:
raise ValueError('Unknown subproject: %s' % name)
return os.path.isdir(os.path.join(self.llvm_dir, subdir))
# Note that we don't allow capturing stdout/stderr. This works quite nicely
# with dry_run.
def run_command(self,
cmd,
cwd=None,
check=False,
silent_unless_error=False):
print(
'Running `%s` in %s' % (cmd, shlex.quote(cwd or os.getcwd())))
if self.dry_run:
return
if silent_unless_error:
stdout, stderr = subprocess.PIPE, subprocess.STDOUT
else:
stdout, stderr = None, None
# Don't use subprocess.run because it's >= py3.5 only, and it's not too
# much extra effort to get what it gives us anyway.
popen = subprocess.Popen(
cmd,
stdin=subprocess.DEVNULL,
stdout=stdout,
stderr=stderr,
cwd=cwd)
stdout, _ = popen.communicate()
return_code = popen.wait(timeout=0)
if not return_code:
return
if silent_unless_error:
print(stdout.decode('utf-8', 'ignore'))
if check:
raise subprocess.CalledProcessError(
returncode=return_code, cmd=cmd, output=stdout, stderr=None)
def _get_default_cmake_invocation(env):
inv = CmakeInvocation(
cmake='cmake', maker=env.get_cmake_maker(), cmake_dir=env.llvm_dir)
for key, value in env.get_default_cmake_args_kv():
inv.add_new_flag(key, value)
return inv
def _get_cmake_invocation_for_bootstrap_from(env, out_dir,
skip_tablegens=True):
clang = os.path.join(out_dir, 'bin', 'clang')
cmake = _get_default_cmake_invocation(env)
cmake.add_new_flag('CMAKE_C_COMPILER', clang)
cmake.add_new_flag('CMAKE_CXX_COMPILER', clang + '++')
# We often get no value out of building new tblgens; the previous build
# should have them. It's still correct to build them, just slower.
def add_tablegen(key, binary):
path = os.path.join(out_dir, 'bin', binary)
# Check that this exists, since the user's allowed to specify their own
# stage1 directory (which is generally where we'll source everything
# from). Dry runs should hope for the best from our user, as well.
if env.dry_run or os.path.exists(path):
cmake.add_new_flag(key, path)
if skip_tablegens:
add_tablegen('LLVM_TABLEGEN', 'llvm-tblgen')
add_tablegen('CLANG_TABLEGEN', 'clang-tblgen')
return cmake
def _build_things_in(env, target_dir, what):
cmd = env.get_make_command() + what
env.run_command(cmd, cwd=target_dir, check=True)
def _run_fresh_cmake(env, cmake, target_dir):
if not env.dry_run:
try:
shutil.rmtree(target_dir)
except FileNotFoundError:
pass
os.makedirs(target_dir, mode=0o755)
cmake_args = cmake.to_args()
env.run_command(
cmake_args, cwd=target_dir, check=True, silent_unless_error=True)
def _build_stage1_clang(env):
target_dir = env.output_subdir('stage1')
cmake = _get_default_cmake_invocation(env)
_run_fresh_cmake(env, cmake, target_dir)
_build_things_in(env, target_dir, what=['clang', 'llvm-profdata', 'profile'])
return target_dir
def _generate_instrumented_clang_profile(env, stage1_dir, profile_dir,
output_file):
llvm_profdata = os.path.join(stage1_dir, 'bin', 'llvm-profdata')
if env.dry_run:
profiles = [os.path.join(profile_dir, '*.profraw')]
else:
profiles = [
os.path.join(profile_dir, f) for f in os.listdir(profile_dir)
if f.endswith('.profraw')
]
cmd = [llvm_profdata, 'merge', '-output=' + output_file] + profiles
env.run_command(cmd, check=True)
def _build_instrumented_clang(env, stage1_dir):
assert os.path.isabs(stage1_dir)
target_dir = os.path.join(env.output_dir, 'instrumented')
cmake = _get_cmake_invocation_for_bootstrap_from(env, stage1_dir)
cmake.add_new_flag('LLVM_BUILD_INSTRUMENTED', 'IR')
# libcxx's configure step messes with our link order: we'll link
# libclang_rt.profile after libgcc, and the former requires atexit from the
# latter. So, configure checks fail.
#
# Since we don't need libcxx or compiler-rt anyway, just disable them.
cmake.add_new_flag('LLVM_BUILD_RUNTIME', 'No')
_run_fresh_cmake(env, cmake, target_dir)
_build_things_in(env, target_dir, what=['clang', 'lld'])
profiles_dir = os.path.join(target_dir, 'profiles')
return target_dir, profiles_dir
def _build_optimized_clang(env, stage1_dir, profdata_file):
if not env.dry_run and not os.path.exists(profdata_file):
raise ValueError('Looks like the profdata file at %s doesn\'t exist' %
profdata_file)
target_dir = os.path.join(env.output_dir, 'optimized')
cmake = _get_cmake_invocation_for_bootstrap_from(env, stage1_dir)
cmake.add_new_flag('LLVM_PROFDATA_FILE', os.path.abspath(profdata_file))
# We'll get complaints about hash mismatches in `main` in tools/etc. Ignore
# it.
cmake.add_cflags(['-Wno-backend-plugin'])
_run_fresh_cmake(env, cmake, target_dir)
_build_things_in(env, target_dir, what=['clang'])
return target_dir
Args = collections.namedtuple('Args', [
'do_optimized_build',
'include_debug_info',
'profile_location',
'stage1_dir',
])
def _parse_args():
parser = argparse.ArgumentParser(
description='Builds LLVM and Clang with instrumentation, collects '
'instrumentation profiles for them, and (optionally) builds things'
'with these PGO profiles. By default, it\'s assumed that you\'re '
'running this from your LLVM root, and all build artifacts will be '
'saved to $PWD/out.')
parser.add_argument(
'--cmake-extra-arg',
action='append',
default=[],
help='an extra arg to pass to all cmake invocations. Note that this '
'is interpreted as a -D argument, e.g. --cmake-extra-arg FOO=BAR will '
'be passed as -DFOO=BAR. This may be specified multiple times.')
parser.add_argument(
'--dry-run',
action='store_true',
help='print commands instead of running them')
parser.add_argument(
'--llvm-dir',
default='.',
help='directory containing an LLVM checkout (default: $PWD)')
parser.add_argument(
'--no-optimized-build',
action='store_true',
help='disable the final, PGO-optimized build')
parser.add_argument(
'--out-dir',
help='directory to write artifacts to (default: $llvm_dir/out)')
parser.add_argument(
'--profile-output',
help='where to output the profile (default is $out/pgo_profile.prof)')
parser.add_argument(
'--stage1-dir',
help='instead of having an initial build of everything, use the given '
'directory. It is expected that this directory will have clang, '
'llvm-profdata, and the appropriate libclang_rt.profile already built')
parser.add_argument(
'--use-debug-info-in-benchmark',
action='store_true',
help='use a regular build instead of RelWithDebInfo in the benchmark. '
'This increases benchmark execution time and disk space requirements, '
'but gives more coverage over debuginfo bits in LLVM and clang.')
parser.add_argument(
'--use-make',
action='store_true',
default=shutil.which('ninja') is None,
help='use Makefiles instead of ninja')
args = parser.parse_args()
llvm_dir = os.path.abspath(args.llvm_dir)
if args.out_dir is None:
output_dir = os.path.join(llvm_dir, 'out')
else:
output_dir = os.path.abspath(args.out_dir)
extra_args = {'CMAKE_BUILD_TYPE': 'Release',
'LLVM_ENABLE_PROJECTS': 'clang;compiler-rt;lld'}
for arg in args.cmake_extra_arg:
if arg.startswith('-D'):
arg = arg[2:]
elif arg.startswith('-'):
raise ValueError('Unknown not- -D arg encountered; you may need '
'to tweak the source...')
split = arg.split('=', 1)
if len(split) == 1:
key, val = split[0], ''
else:
key, val = split
extra_args[key] = val
env = Env(
default_cmake_args=extra_args,
dry_run=args.dry_run,
llvm_dir=llvm_dir,
output_dir=output_dir,
use_make=args.use_make,
)
if args.profile_output is not None:
profile_location = args.profile_output
else:
profile_location = os.path.join(env.output_dir, 'pgo_profile.prof')
result_args = Args(
do_optimized_build=not args.no_optimized_build,
include_debug_info=args.use_debug_info_in_benchmark,
profile_location=profile_location,
stage1_dir=args.stage1_dir,
)
return env, result_args
def _looks_like_llvm_dir(directory):
"""Arbitrary set of heuristics to determine if `directory` is an llvm dir.
Errs on the side of false-positives."""
contents = set(os.listdir(directory))
expected_contents = [
'CODE_OWNERS.TXT',
'cmake',
'docs',
'include',
'utils',
]
if not all(c in contents for c in expected_contents):
return False
try:
include_listing = os.listdir(os.path.join(directory, 'include'))
except NotADirectoryError:
return False
return 'llvm' in include_listing
def _die(*args, **kwargs):
kwargs['file'] = sys.stderr
print(*args, **kwargs)
sys.exit(1)
def _main():
env, args = _parse_args()
if not _looks_like_llvm_dir(env.llvm_dir):
_die('Looks like %s isn\'t an LLVM directory; please see --help' %
env.llvm_dir)
if not env.has_llvm_subproject('clang'):
_die('Need a clang checkout at tools/clang')
if not env.has_llvm_subproject('compiler-rt'):
_die('Need a compiler-rt checkout at projects/compiler-rt')
def status(*args):
print(*args, file=sys.stderr)
if args.stage1_dir is None:
status('*** Building stage1 clang...')
stage1_out = _build_stage1_clang(env)
else:
stage1_out = args.stage1_dir
status('*** Building instrumented clang...')
instrumented_out, profile_dir = _build_instrumented_clang(env, stage1_out)
status('*** Running profdata benchmarks...')
_run_benchmark(env, instrumented_out, args.include_debug_info)
status('*** Generating profile...')
_generate_instrumented_clang_profile(env, stage1_out, profile_dir,
args.profile_location)
print('Final profile:', args.profile_location)
if args.do_optimized_build:
status('*** Building PGO-optimized binaries...')
optimized_out = _build_optimized_clang(env, stage1_out,
args.profile_location)
print('Final build directory:', optimized_out)
if __name__ == '__main__':
_main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/collect_and_build_with_pgo.py |
#!/usr/bin/env python3
"""A script to generate FileCheck statements for 'opt' regression tests.
This script is a utility to update LLVM opt test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
Example usage:
$ update_test_checks.py --opt=../bin/opt test/foo.ll
Workflow:
1. Make a compiler patch that requires updating some number of FileCheck lines
in regression test files.
2. Save the patch and revert it from your local work area.
3. Update the RUN-lines in the affected regression tests to look canonical.
Example: "; RUN: opt < %s -instcombine -S | FileCheck %s"
4. Refresh the FileCheck lines for either the entire file or select functions by
running this script.
5. Commit the fresh baseline of checks.
6. Apply your patch from step 1 and rebuild your local binaries.
7. Re-run this script on affected regression tests.
8. Check the diffs to ensure the script has done something reasonable.
9. Submit a patch including the regression test diffs for review.
A common pattern is to have the script insert complete checking of every
instruction. Then, edit it down to only check the relevant instructions.
The script is designed to make adding checks to a test case fast, it is *not*
designed to be authoratitive about what constitutes a good test!
"""
from __future__ import print_function
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import re
import sys
from UpdateTestChecks import common
def main():
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument('--opt-binary', default='opt',
help='The opt binary used to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument('-p', '--preserve-names', action='store_true',
help='Do not scrub IR names')
parser.add_argument('--function-signature', action='store_true',
help='Keep function signature information around for the check line')
parser.add_argument('--scrub-attributes', action='store_true',
help='Remove attribute annotations (#0) from the end of check line')
parser.add_argument('--check-attributes', action='store_true',
help='Check "Function Attributes" for functions')
parser.add_argument('tests', nargs='+')
initial_args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
opt_basename = os.path.basename(initial_args.opt_binary)
if not re.match(r'^opt(-\d+)?(\.exe)?$', opt_basename):
common.error('Unexpected opt name: ' + opt_basename)
sys.exit(1)
opt_basename = 'opt'
for ti in common.itertests(initial_args.tests, parser,
script_name='utils/' + script_name):
# If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
if ti.args.scrub_attributes:
common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
else:
common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE
prefix_list = []
for l in ti.run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(opt_basename + ' '):
common.warn('Skipping non-%s RUN line: %s' % (opt_basename, l))
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: ' + l)
continue
tool_cmd_args = tool_cmd[len(opt_basename):].strip()
tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [item for m in
common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
global_vars_seen_dict = {}
builder = common.FunctionTestBuilder(
run_list=prefix_list,
flags=ti.args,
scrubber_args=[])
for prefixes, opt_args in prefix_list:
common.debug('Extracted opt cmd: ' + opt_basename + ' ' + opt_args)
common.debug('Extracted FileCheck prefixes: ' + str(prefixes))
raw_tool_output = common.invoke_tool(ti.args.opt_binary, opt_args,
ti.path)
builder.process_run_line(common.OPT_FUNCTION_RE, common.scrub_body,
raw_tool_output, prefixes)
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
common.debug('Rewriting FileCheck prefixes:', str(prefix_set))
output_lines = []
include_generated_funcs = common.find_arg_in_test(ti,
lambda args: ti.args.include_generated_funcs,
'--include-generated-funcs',
True)
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# We can't predict where various passes might insert functions so we can't
# be sure the input function order is maintained. Therefore, first spit
# out all the source lines.
common.dump_input_lines(output_lines, ti, prefix_set, ';')
# Now generate all the checks.
common.add_checks_at_end(output_lines, prefix_list, builder.func_order(),
';', lambda my_output_lines, prefixes, func:
common.add_ir_checks(my_output_lines, ';',
prefixes,
func_dict, func, False,
ti.args.function_signature,
global_vars_seen_dict))
else:
# "Normal" mode.
for input_line_info in ti.iterlines(output_lines):
input_line = input_line_info.line
args = input_line_info.args
if is_in_function_start:
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
common.add_ir_checks(output_lines, ';', prefix_list, func_dict,
func_name, args.preserve_names, args.function_signature,
global_vars_seen_dict)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line)
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
with open(ti.path, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_test_checks.py |
#!/usr/bin/env python
"""A tool for extracting a list of symbols to export
When exporting symbols from a dll or exe we either need to mark the symbols in
the source code as __declspec(dllexport) or supply a list of symbols to the
linker. This program automates the latter by inspecting the symbol tables of a
list of link inputs and deciding which of those symbols need to be exported.
We can't just export all the defined symbols, as there's a limit of 65535
exported symbols and in clang we go way over that, particularly in a debug
build. Therefore a large part of the work is pruning symbols either which can't
be imported, or which we think are things that have definitions in public header
files (i.e. template instantiations) and we would get defined in the thing
importing these symbols anyway.
"""
from __future__ import print_function
import sys
import re
import os
import subprocess
import multiprocessing
import argparse
# Define functions which extract a list of symbols from a library using several
# different tools. We use subprocess.Popen and yield a symbol at a time instead
# of using subprocess.check_output and returning a list as, especially on
# Windows, waiting for the entire output to be ready can take a significant
# amount of time.
def dumpbin_get_symbols(lib):
process = subprocess.Popen(['dumpbin','/symbols',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# Look for external symbols that are defined in some section
match = re.match("^.+SECT.+External\s+\|\s+(\S+).*$", line)
if match:
yield match.group(1)
process.wait()
def nm_get_symbols(lib):
if sys.platform.startswith('aix'):
process = subprocess.Popen(['nm','-P','-Xany','-C','-p',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
else:
process = subprocess.Popen(['nm','-P',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# Look for external symbols that are defined in some section
match = re.match("^(\S+)\s+[BDGRSTVW]\s+\S+\s+\S+$", line)
if match:
yield match.group(1)
process.wait()
def readobj_get_symbols(lib):
process = subprocess.Popen(['llvm-readobj','-symbols',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# When looking through the output of llvm-readobj we expect to see Name,
# Section, then StorageClass, so record Name and Section when we see
# them and decide if this is a defined external symbol when we see
# StorageClass.
match = re.search('Name: (\S+)', line)
if match:
name = match.group(1)
match = re.search('Section: (\S+)', line)
if match:
section = match.group(1)
match = re.search('StorageClass: (\S+)', line)
if match:
storageclass = match.group(1)
if section != 'IMAGE_SYM_ABSOLUTE' and \
section != 'IMAGE_SYM_UNDEFINED' and \
storageclass == 'External':
yield name
process.wait()
# Define functions which determine if the target is 32-bit Windows (as that's
# where calling convention name decoration happens).
def dumpbin_is_32bit_windows(lib):
# dumpbin /headers can output a huge amount of data (>100MB in a debug
# build) so we read only up to the 'machine' line then close the output.
process = subprocess.Popen(['dumpbin','/headers',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
retval = False
for line in process.stdout:
match = re.match('.+machine \((\S+)\)', line)
if match:
retval = (match.group(1) == 'x86')
break
process.stdout.close()
process.wait()
return retval
def objdump_is_32bit_windows(lib):
output = subprocess.check_output(['objdump','-f',lib],
universal_newlines=True)
for line in output:
match = re.match('.+file format (\S+)', line)
if match:
return (match.group(1) == 'pe-i386')
return False
def readobj_is_32bit_windows(lib):
output = subprocess.check_output(['llvm-readobj','-file-headers',lib],
universal_newlines=True)
for line in output:
match = re.match('Format: (\S+)', line)
if match:
return (match.group(1) == 'COFF-i386')
return False
# MSVC mangles names to ?<identifier_mangling>@<type_mangling>. By examining the
# identifier/type mangling we can decide which symbols could possibly be
# required and which we can discard.
def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
# Keep unmangled (i.e. extern "C") names
if not '?' in symbol:
if calling_convention_decoration:
# Remove calling convention decoration from names
match = re.match('[_@]([^@]+)', symbol)
if match:
return match.group(1)
return symbol
# Function template instantiations start with ?$; keep the instantiations of
# clang::Type::getAs, as some of them are explipict specializations that are
# defined in clang's lib/AST/Type.cpp; discard the rest as it's assumed that
# the definition is public
elif re.match('\?\?\$getAs@.+@Type@clang@@', symbol):
return symbol
elif symbol.startswith('??$'):
return None
# Deleting destructors start with ?_G or ?_E and can be discarded because
# link.exe gives you a warning telling you they can't be exported if you
# don't
elif symbol.startswith('??_G') or symbol.startswith('??_E'):
return None
# Constructors (?0) and destructors (?1) of templates (?$) are assumed to be
# defined in headers and not required to be kept
elif symbol.startswith('??0?$') or symbol.startswith('??1?$'):
return None
# An anonymous namespace is mangled as ?A(maybe hex number)@. Any symbol
# that mentions an anonymous namespace can be discarded, as the anonymous
# namespace doesn't exist outside of that translation unit.
elif re.search('\?A(0x\w+)?@', symbol):
return None
# Keep mangled llvm:: and clang:: function symbols. How we detect these is a
# bit of a mess and imprecise, but that avoids having to completely demangle
# the symbol name. The outermost namespace is at the end of the identifier
# mangling, and the identifier mangling is followed by the type mangling, so
# we look for (llvm|clang)@@ followed by something that looks like a
# function type mangling. To spot a function type we use (this is derived
# from clang/lib/AST/MicrosoftMangle.cpp):
# <function-type> ::= <function-class> <this-cvr-qualifiers>
# <calling-convention> <return-type>
# <argument-list> <throw-spec>
# <function-class> ::= [A-Z]
# <this-cvr-qualifiers> ::= [A-Z0-9_]*
# <calling-convention> ::= [A-JQ]
# <return-type> ::= .+
# <argument-list> ::= X (void)
# ::= .+@ (list of types)
# ::= .*Z (list of types, varargs)
# <throw-spec> ::= exceptions are not allowed
elif re.search('(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$', symbol):
return symbol
return None
# Itanium manglings are of the form _Z<identifier_mangling><type_mangling>. We
# demangle the identifier mangling to identify symbols that can be safely
# discarded.
def should_keep_itanium_symbol(symbol, calling_convention_decoration):
# Start by removing any calling convention decoration (which we expect to
# see on all symbols, even mangled C++ symbols)
if calling_convention_decoration and symbol.startswith('_'):
symbol = symbol[1:]
# Keep unmangled names
if not symbol.startswith('_') and not symbol.startswith('.'):
return symbol
# Discard manglings that aren't nested names
match = re.match('_Z(T[VTIS])?(N.+)', symbol)
if not match:
return None
# Demangle the name. If the name is too complex then we don't need to keep
# it, but it the demangling fails then keep the symbol just in case.
try:
names, _ = parse_itanium_nested_name(match.group(2))
except TooComplexName:
return None
if not names:
return symbol
# Constructors and destructors of templates classes are assumed to be
# defined in headers and not required to be kept
if re.match('[CD][123]', names[-1][0]) and names[-2][1]:
return None
# Keep the instantiations of clang::Type::getAs, as some of them are
# explipict specializations that are defined in clang's lib/AST/Type.cpp;
# discard any other function template instantiations as it's assumed that
# the definition is public
elif symbol.startswith('_ZNK5clang4Type5getAs'):
return symbol
elif names[-1][1]:
return None
# Keep llvm:: and clang:: names
elif names[0][0] == '4llvm' or names[0][0] == '5clang':
return symbol
# Discard everything else
else:
return None
# Certain kinds of complex manglings we assume cannot be part of a public
# interface, and we handle them by raising an exception.
class TooComplexName(Exception):
pass
# Parse an itanium mangled name from the start of a string and return a
# (name, rest of string) pair.
def parse_itanium_name(arg):
# Check for a normal name
match = re.match('(\d+)(.+)', arg)
if match:
n = int(match.group(1))
name = match.group(1)+match.group(2)[:n]
rest = match.group(2)[n:]
return name, rest
# Check for constructor/destructor names
match = re.match('([CD][123])(.+)', arg)
if match:
return match.group(1), match.group(2)
# Assume that a sequence of characters that doesn't end a nesting is an
# operator (this is very imprecise, but appears to be good enough)
match = re.match('([^E]+)(.+)', arg)
if match:
return match.group(1), match.group(2)
# Anything else: we can't handle it
return None, arg
# Parse an itanium mangled template argument list from the start of a string
# and throw it away, returning the rest of the string.
def skip_itanium_template(arg):
# A template argument list starts with I
assert arg.startswith('I'), arg
tmp = arg[1:]
while tmp:
# Check for names
match = re.match('(\d+)(.+)', tmp)
if match:
n = int(match.group(1))
tmp = match.group(2)[n:]
continue
# Check for substitutions
match = re.match('S[A-Z0-9]*_(.+)', tmp)
if match:
tmp = match.group(1)
# Start of a template
elif tmp.startswith('I'):
tmp = skip_itanium_template(tmp)
# Start of a nested name
elif tmp.startswith('N'):
_, tmp = parse_itanium_nested_name(tmp)
# Start of an expression: assume that it's too complicated
elif tmp.startswith('L') or tmp.startswith('X'):
raise TooComplexName
# End of the template
elif tmp.startswith('E'):
return tmp[1:]
# Something else: probably a type, skip it
else:
tmp = tmp[1:]
return None
# Parse an itanium mangled nested name and transform it into a list of pairs of
# (name, is_template), returning (list, rest of string).
def parse_itanium_nested_name(arg):
# A nested name starts with N
assert arg.startswith('N'), arg
ret = []
# Skip past the N, and possibly a substitution
match = re.match('NS[A-Z0-9]*_(.+)', arg)
if match:
tmp = match.group(1)
else:
tmp = arg[1:]
# Skip past CV-qualifiers and ref qualifiers
match = re.match('[rVKRO]*(.+)', tmp);
if match:
tmp = match.group(1)
# Repeatedly parse names from the string until we reach the end of the
# nested name
while tmp:
# An E ends the nested name
if tmp.startswith('E'):
return ret, tmp[1:]
# Parse a name
name_part, tmp = parse_itanium_name(tmp)
if not name_part:
# If we failed then we don't know how to demangle this
return None, None
is_template = False
# If this name is a template record that, then skip the template
# arguments
if tmp.startswith('I'):
tmp = skip_itanium_template(tmp)
is_template = True
# Add the name to the list
ret.append((name_part, is_template))
# If we get here then something went wrong
return None, None
def extract_symbols(arg):
get_symbols, should_keep_symbol, calling_convention_decoration, lib = arg
symbols = dict()
for symbol in get_symbols(lib):
symbol = should_keep_symbol(symbol, calling_convention_decoration)
if symbol:
symbols[symbol] = 1 + symbols.setdefault(symbol,0)
return symbols
if __name__ == '__main__':
tool_exes = ['dumpbin','nm','objdump','llvm-readobj']
parser = argparse.ArgumentParser(
description='Extract symbols to export from libraries')
parser.add_argument('--mangling', choices=['itanium','microsoft'],
required=True, help='expected symbol mangling scheme')
parser.add_argument('--tools', choices=tool_exes, nargs='*',
help='tools to use to extract symbols and determine the'
' target')
parser.add_argument('libs', metavar='lib', type=str, nargs='+',
help='libraries to extract symbols from')
parser.add_argument('-o', metavar='file', type=str, help='output to file')
args = parser.parse_args()
# Determine the function to use to get the list of symbols from the inputs,
# and the function to use to determine if the target is 32-bit windows.
tools = { 'dumpbin' : (dumpbin_get_symbols, dumpbin_is_32bit_windows),
'nm' : (nm_get_symbols, None),
'objdump' : (None, objdump_is_32bit_windows),
'llvm-readobj' : (readobj_get_symbols, readobj_is_32bit_windows) }
get_symbols = None
is_32bit_windows = None
# If we have a tools argument then use that for the list of tools to check
if args.tools:
tool_exes = args.tools
# Find a tool to use by trying each in turn until we find one that exists
# (subprocess.call will throw OSError when the program does not exist)
get_symbols = None
for exe in tool_exes:
try:
# Close std streams as we don't want any output and we don't
# want the process to wait for something on stdin.
p = subprocess.Popen([exe], stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
p.stdout.close()
p.stderr.close()
p.stdin.close()
p.wait()
# Keep going until we have a tool to use for both get_symbols and
# is_32bit_windows
if not get_symbols:
get_symbols = tools[exe][0]
if not is_32bit_windows:
is_32bit_windows = tools[exe][1]
if get_symbols and is_32bit_windows:
break
except OSError:
continue
if not get_symbols:
print("Couldn't find a program to read symbols with", file=sys.stderr)
exit(1)
if not is_32bit_windows:
print("Couldn't find a program to determining the target", file=sys.stderr)
exit(1)
# How we determine which symbols to keep and which to discard depends on
# the mangling scheme
if args.mangling == 'microsoft':
should_keep_symbol = should_keep_microsoft_symbol
else:
should_keep_symbol = should_keep_itanium_symbol
# Get the list of libraries to extract symbols from
libs = list()
for lib in args.libs:
# When invoked by cmake the arguments are the cmake target names of the
# libraries, so we need to add .lib/.a to the end and maybe lib to the
# start to get the filename. Also allow objects.
suffixes = ['.lib','.a','.obj','.o']
if not any([lib.endswith(s) for s in suffixes]):
for s in suffixes:
if os.path.exists(lib+s):
lib = lib+s
break
if os.path.exists('lib'+lib+s):
lib = 'lib'+lib+s
break
if not any([lib.endswith(s) for s in suffixes]):
print("Don't know what to do with argument "+lib, file=sys.stderr)
exit(1)
libs.append(lib)
# Check if calling convention decoration is used by inspecting the first
# library in the list
calling_convention_decoration = is_32bit_windows(libs[0])
# Extract symbols from libraries in parallel. This is a huge time saver when
# doing a debug build, as there are hundreds of thousands of symbols in each
# library.
pool = multiprocessing.Pool()
try:
# Only one argument can be passed to the mapping function, and we can't
# use a lambda or local function definition as that doesn't work on
# windows, so create a list of tuples which duplicates the arguments
# that are the same in all calls.
vals = [(get_symbols, should_keep_symbol, calling_convention_decoration, x) for x in libs]
# Do an async map then wait for the result to make sure that
# KeyboardInterrupt gets caught correctly (see
# http://bugs.python.org/issue8296)
result = pool.map_async(extract_symbols, vals)
pool.close()
libs_symbols = result.get(3600)
except KeyboardInterrupt:
# On Ctrl-C terminate everything and exit
pool.terminate()
pool.join()
exit(1)
# Merge everything into a single dict
symbols = dict()
for this_lib_symbols in libs_symbols:
for k,v in list(this_lib_symbols.items()):
symbols[k] = v + symbols.setdefault(k,0)
# Count instances of member functions of template classes, and map the
# symbol name to the function+class. We do this under the assumption that if
# a member function of a template class is instantiated many times it's
# probably declared in a public header file.
template_function_count = dict()
template_function_mapping = dict()
template_function_count[""] = 0
for k in symbols:
name = None
if args.mangling == 'microsoft':
# Member functions of templates start with
# ?<fn_name>@?$<class_name>@, so we map to <fn_name>@?$<class_name>.
# As manglings go from the innermost scope to the outermost scope
# this means:
# * When we have a function member of a subclass of a template
# class then <fn_name> will actually contain the mangling of
# both the subclass and the function member. This is fine.
# * When we have a function member of a template subclass of a
# (possibly template) class then it's the innermost template
# subclass that becomes <class_name>. This should be OK so long
# as we don't have multiple classes with a template subclass of
# the same name.
match = re.search("^\?(\??\w+\@\?\$\w+)\@", k)
if match:
name = match.group(1)
else:
# Find member functions of templates by demangling the name and
# checking if the second-to-last name in the list is a template.
match = re.match('_Z(T[VTIS])?(N.+)', k)
if match:
try:
names, _ = parse_itanium_nested_name(match.group(2))
if names and names[-2][1]:
name = ''.join([x for x,_ in names])
except TooComplexName:
# Manglings that are too complex should already have been
# filtered out, but if we happen to somehow see one here
# just leave it as-is.
pass
if name:
old_count = template_function_count.setdefault(name,0)
template_function_count[name] = old_count + 1
template_function_mapping[k] = name
else:
template_function_mapping[k] = ""
# Print symbols which both:
# * Appear in exactly one input, as symbols defined in multiple
# objects/libraries are assumed to have public definitions.
# * Aren't instances of member functions of templates which have been
# instantiated 100 times or more, which are assumed to have public
# definitions. (100 is an arbitrary guess here.)
if args.o:
outfile = open(args.o,'w')
else:
outfile = sys.stdout
for k,v in list(symbols.items()):
template_count = template_function_count[template_function_mapping[k]]
if v == 1 and template_count < 100:
print(k, file=outfile)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/extract_symbols.py |
#!/usr/bin/env python
"""A tool for looking for indirect jumps and calls in x86 binaries.
Helpful to verify whether or not retpoline mitigations are catching
all of the indirect branches in a binary and telling you which
functions the remaining ones are in (assembly, etc).
Depends on llvm-objdump being in your path and is tied to the
dump format.
"""
from __future__ import print_function
import os
import sys
import re
import subprocess
import optparse
# Look for indirect calls/jmps in a binary. re: (call|jmp).*\*
def look_for_indirect(file):
args = ['llvm-objdump']
args.extend(["-d"])
args.extend([file])
p = subprocess.Popen(args=args, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout,stderr) = p.communicate()
function = ""
for line in stdout.splitlines():
if line.startswith(' ') == False:
function = line
result = re.search('(call|jmp).*\*', line)
if result != None:
# TODO: Perhaps use cxxfilt to demangle functions?
print(function)
print(line)
return
def main(args):
# No options currently other than the binary.
parser = optparse.OptionParser("%prog [options] <binary>")
(opts, args) = parser.parse_args(args)
if len(args) != 2:
parser.error("invalid number of arguments: %s" % len(args))
look_for_indirect(args[1])
if __name__ == '__main__':
main(sys.argv)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/indirect_calls.py |
#!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
def sort_includes(f):
"""Sort the #include lines of a specific file."""
# Skip files which are under INPUTS trees or test trees.
if 'INPUTS/' in f.name or 'test/' in f.name:
return
ext = os.path.splitext(f.name)[1]
if ext not in ['.cpp', '.c', '.h', '.inc', '.def']:
return
lines = f.readlines()
look_for_api_header = ext in ['.cpp', '.c']
found_headers = False
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
subproject_headers = []
llvm_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if not found_headers:
headers_begin = i
found_headers = True
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if (header.startswith('<') or header.startswith('"gtest/') or
header.startswith('"isl/') or header.startswith('"json/')):
system_headers.append(header)
continue
if (header.startswith('"clang/') or header.startswith('"clang-c/') or
header.startswith('"polly/')):
subproject_headers.append(header)
continue
if (header.startswith('"llvm/') or header.startswith('"llvm-c/')):
llvm_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if found_headers:
break
if l.startswith('//') or l.startswith('#define') or l.startswith('#ifndef'):
continue
break
if not found_headers:
return
local_headers = sorted(set(local_headers))
subproject_headers = sorted(set(subproject_headers))
llvm_headers = sorted(set(llvm_headers))
system_headers = sorted(set(system_headers))
headers = api_headers + local_headers + subproject_headers + llvm_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/sort_includes.py |
#!/usr/bin/env python
"""
Helper script to convert the log generated by '-debug-only=constraint-system'
to a Python script that uses Z3 to verify the decisions using Z3's Python API.
Example usage:
> cat path/to/file.log
---
x6 + -1 * x7 <= -1
x6 + -1 * x7 <= -2
sat
> ./convert-constraint-log-to-z3.py path/to/file.log > check.py && python ./check.py
> cat check.py
from z3 import *
x3 = Int("x3")
x1 = Int("x1")
x2 = Int("x2")
s = Solver()
s.add(x1 + -1 * x2 <= 0)
s.add(x2 + -1 * x3 <= 0)
s.add(-1 * x1 + x3 <= -1)
assert(s.check() == unsat)
print('all checks passed')
"""
import argparse
import re
def main():
parser = argparse.ArgumentParser(
description='Convert constraint log to script to verify using Z3.')
parser.add_argument('log_file', metavar='log', type=str,
help='constraint-system log file')
args = parser.parse_args()
content = ''
with open(args.log_file, 'rt') as f:
content = f.read()
groups = content.split('---')
var_re = re.compile('x\d+')
print('from z3 import *')
for group in groups:
constraints = [g.strip() for g in group.split('\n') if g.strip() != '']
variables = set()
for c in constraints[:-1]:
for m in var_re.finditer(c):
variables.add(m.group())
if len(variables) == 0:
continue
for v in variables:
print('{} = Int("{}")'.format(v, v))
print('s = Solver()')
for c in constraints[:-1]:
print('s.add({})'.format(c))
expected = constraints[-1].strip()
print('assert(s.check() == {})'.format(expected))
print('print("all checks passed")')
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/convert-constraint-log-to-z3.py |
#!/usr/bin/env python
"""
Unicode case folding database conversion utility
Parses the database and generates a C++ function which implements the case
folding algorithm. The database entries are of the form:
<code>; <status>; <mapping>; # <name>
<status> can be one of four characters:
C - Common mappings
S - mappings for Simple case folding
F - mappings for Full case folding
T - special case for Turkish I characters
Right now this generates a function which implements simple case folding (C+S
entries).
"""
from __future__ import print_function
import sys
import re
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# This variable will body of the mappings function
body = ""
# Reads file line-by-line, extracts Common and Simple case fold mappings and
# returns a (from_char, to_char, from_name) tuple.
def mappings(f):
previous_from = -1
expr = re.compile(r'^(.*); [CS]; (.*); # (.*)')
for line in f:
m = expr.match(line)
if not m: continue
from_char = int(m.group(1), 16)
to_char = int(m.group(2), 16)
from_name = m.group(3)
if from_char <= previous_from:
raise Exception("Duplicate or unsorted characters in input")
yield from_char, to_char, from_name
previous_from = from_char
# Computes the shift (to_char - from_char) in a mapping.
def shift(mapping):
return mapping[1] - mapping[0]
# Computes the stride (from_char2 - from_char1) of two mappings.
def stride2(mapping1, mapping2):
return mapping2[0] - mapping1[0]
# Computes the stride of a list of mappings. The list should have at least two
# mappings. All mappings in the list are assumed to have the same stride.
def stride(block):
return stride2(block[0], block[1])
# b is a list of mappings. All the mappings are assumed to have the same
# shift and the stride between adjecant mappings (if any) is constant.
def dump_block(b):
global body
if len(b) == 1:
# Special case for handling blocks of length 1. We don't even need to
# emit the "if (C < X) return C" check below as all characters in this
# range will be caught by the "C < X" check emitted by the first
# non-trivial block.
body += " // {2}\n if (C == {0:#06x})\n return {1:#06x};\n".format(*b[0])
return
first = b[0][0]
last = first + stride(b) * (len(b)-1)
modulo = first % stride(b)
# All characters before this block map to themselves.
body += " if (C < {0:#06x})\n return C;\n".format(first)
body += " // {0} characters\n".format(len(b))
# Generic pattern: check upper bound (lower bound is checked by the "if"
# above) and modulo of C, return C+shift.
pattern = " if (C <= {0:#06x} && C % {1} == {2})\n return C + {3};\n"
if stride(b) == 2 and shift(b[0]) == 1 and modulo == 0:
# Special case:
# We can elide the modulo-check because the expression "C|1" will map
# the intervening characters to themselves.
pattern = " if (C <= {0:#06x})\n return C | 1;\n"
elif stride(b) == 1:
# Another special case: X % 1 is always zero, so don't emit the
# modulo-check.
pattern = " if (C <= {0:#06x})\n return C + {3};\n"
body += pattern.format(last, stride(b), modulo, shift(b[0]))
current_block = []
f = urlopen(sys.argv[1])
for m in mappings(f):
if len(current_block) == 0:
current_block.append(m)
continue
if shift(current_block[0]) != shift(m):
# Incompatible shift, start a new block.
dump_block(current_block)
current_block = [m]
continue
if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m):
current_block.append(m)
continue
# Incompatible stride, start a new block.
dump_block(current_block)
current_block = [m]
f.close()
dump_block(current_block)
print('//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//')
print('//')
print('// This file was generated by utils/unicode-case-fold.py from the Unicode')
print('// case folding database at')
print('// ', sys.argv[1])
print('//')
print('// To regenerate this file, run:')
print('// utils/unicode-case-fold.py \\')
print('// "{}" \\'.format(sys.argv[1]))
print('// > lib/Support/UnicodeCaseFold.cpp')
print('//')
print('//===----------------------------------------------------------------------===//')
print('')
print('#include "llvm/Support/Unicode.h"')
print('')
print("int llvm::sys::unicode::foldCharSimple(int C) {")
print(body)
print(" return C;")
print("}")
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/unicode-case-fold.py |
#!/usr/bin/env python3
"""A script to generate FileCheck statements for 'opt' analysis tests.
This script is a utility to update LLVM opt analysis test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
Example usage:
$ update_analyze_test_checks.py --opt=../bin/opt test/foo.ll
Workflow:
1. Make a compiler patch that requires updating some number of FileCheck lines
in regression test files.
2. Save the patch and revert it from your local work area.
3. Update the RUN-lines in the affected regression tests to look canonical.
Example: "; RUN: opt < %s -analyze -cost-model -S | FileCheck %s"
4. Refresh the FileCheck lines for either the entire file or select functions by
running this script.
5. Commit the fresh baseline of checks.
6. Apply your patch from step 1 and rebuild your local binaries.
7. Re-run this script on affected regression tests.
8. Check the diffs to ensure the script has done something reasonable.
9. Submit a patch including the regression test diffs for review.
A common pattern is to have the script insert complete checking of every
instruction. Then, edit it down to only check the relevant instructions.
The script is designed to make adding checks to a test case fast, it is *not*
designed to be authoratitive about what constitutes a good test!
"""
from __future__ import print_function
import argparse
import glob
import itertools
import os # Used to advertise this file's name ("autogenerated_note").
import string
import subprocess
import sys
import tempfile
import re
from UpdateTestChecks import common
ADVERT = '; NOTE: Assertions have been autogenerated by '
def main():
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument('--opt-binary', default='opt',
help='The opt binary used to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument('tests', nargs='+')
args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
autogenerated_note = (ADVERT + 'utils/' + script_name)
opt_basename = os.path.basename(args.opt_binary)
if (opt_basename != "opt"):
common.error('Unexpected opt name: ' + opt_basename)
sys.exit(1)
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test in test_paths:
with open(test) as f:
input_lines = [l.rstrip() for l in f]
first_line = input_lines[0] if input_lines else ""
if 'autogenerated' in first_line and script_name not in first_line:
common.warn("Skipping test which wasn't autogenerated by " + script_name + ": " + test)
continue
if args.update_only:
if not first_line or 'autogenerated' not in first_line:
common.warn("Skipping test which isn't autogenerated: " + test)
continue
run_lines = common.find_run_lines(test, input_lines)
prefix_list = []
for l in run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(opt_basename + ' '):
common.warn('WSkipping non-%s RUN line: %s' % (opt_basename, l))
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: ' + l)
continue
tool_cmd_args = tool_cmd[len(opt_basename):].strip()
tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
builder = common.FunctionTestBuilder(
run_list = prefix_list,
flags = type('', (object,), {
'verbose': args.verbose,
'function_signature': False,
'check_attributes': False}),
scrubber_args = [])
for prefixes, opt_args in prefix_list:
common.debug('Extracted opt cmd:', opt_basename, opt_args, file=sys.stderr)
common.debug('Extracted FileCheck prefixes:', str(prefixes), file=sys.stderr)
raw_tool_outputs = common.invoke_tool(args.opt_binary, opt_args, test)
# Split analysis outputs by "Printing analysis " declarations.
for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs):
builder.process_run_line(common.ANALYZE_FUNCTION_RE, common.scrub_body,
raw_tool_output, prefixes)
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
common.debug('Rewriting FileCheck prefixes:', str(prefix_set), file=sys.stderr)
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if is_in_function_start:
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
common.add_analyze_checks(output_lines, ';', prefix_list, func_dict, func_name)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line)
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
# Discard any previous script advertising.
if input_line.startswith(ADVERT):
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
common.debug('Writing %d lines to %s...' % (len(output_lines), test))
with open(test, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_analyze_test_checks.py |
#!/usr/bin/env python
#this is a script to extract given named nodes from a dot file, with
#the associated edges. An edge is kept iff for edge x -> y
# x and y are both nodes specified to be kept.
#known issues: if a line contains '->' and is not an edge line
#problems will occur. If node labels do not begin with
#Node this also will not work. Since this is designed to work
#on DSA dot output and not general dot files this is ok.
#If you want to use this on other files rename the node labels
#to Node[.*] with a script or something. This also relies on
#the length of a node name being 13 characters (as it is in all
#DSA dot output files)
#Note that the name of the node can be any substring of the actual
#name in the dot file. Thus if you say specify COLLAPSED
#as a parameter this script will pull out all COLLAPSED
#nodes in the file
#Specifying escape characters in the name like \n also will not work,
#as Python
#will make it \\n, I'm not really sure how to fix this
#currently the script prints the names it is searching for
#to STDOUT, so you can check to see if they are what you intend
from __future__ import print_function
import re
import string
import sys
if len(sys.argv) < 3:
print('usage is ./DSAextract <dot_file_to_modify> \
<output_file> [list of nodes to extract]')
#open the input file
input = open(sys.argv[1], 'r')
#construct a set of node names
node_name_set = set()
for name in sys.argv[3:]:
node_name_set |= set([name])
#construct a list of compiled regular expressions from the
#node_name_set
regexp_list = []
for name in node_name_set:
regexp_list.append(re.compile(name))
#used to see what kind of line we are on
nodeexp = re.compile('Node')
#used to check to see if the current line is an edge line
arrowexp = re.compile('->')
node_set = set()
#read the file one line at a time
buffer = input.readline()
while buffer != '':
#filter out the unnecessary checks on all the edge lines
if not arrowexp.search(buffer):
#check to see if this is a node we are looking for
for regexp in regexp_list:
#if this name is for the current node, add the dot variable name
#for the node (it will be Node(hex number)) to our set of nodes
if regexp.search(buffer):
node_set |= set([re.split('\s+',buffer,2)[1]])
break
buffer = input.readline()
#test code
#print '\n'
print(node_name_set)
#print node_set
#open the output file
output = open(sys.argv[2], 'w')
#start the second pass over the file
input = open(sys.argv[1], 'r')
buffer = input.readline()
while buffer != '':
#there are three types of lines we are looking for
#1) node lines, 2) edge lines 3) support lines (like page size, etc)
#is this an edge line?
#note that this is no completely robust, if a none edge line
#for some reason contains -> it will be missidentified
#hand edit the file if this happens
if arrowexp.search(buffer):
#check to make sure that both nodes are in the node list
#if they are print this to output
nodes = arrowexp.split(buffer)
nodes[0] = string.strip(nodes[0])
nodes[1] = string.strip(nodes[1])
if nodes[0][:13] in node_set and \
nodes[1][:13] in node_set:
output.write(buffer)
elif nodeexp.search(buffer): #this is a node line
node = re.split('\s+', buffer,2)[1]
if node in node_set:
output.write(buffer)
else: #this is a support line
output.write(buffer)
buffer = input.readline()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/DSAextract.py |
#!/usr/bin/env python
# This script extracts the VPlan digraphs from the vectoriser debug messages
# and saves them in individual dot files (one for each plan). Optionally, and
# providing 'dot' is installed, it can also render the dot into a PNG file.
from __future__ import print_function
import sys
import re
import argparse
import shutil
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('--png', action='store_true')
args = parser.parse_args()
dot = shutil.which('dot')
if args.png and not dot:
raise RuntimeError("Can't export to PNG without 'dot' in the system")
pattern = re.compile(r"(digraph VPlan {.*?\n})",re.DOTALL)
matches = re.findall(pattern, sys.stdin.read())
for vplan in matches:
m = re.search("graph \[.+(VF=.+,UF.+)", vplan)
if not m:
raise ValueError("Can't get the right VPlan name")
name = re.sub('[^a-zA-Z0-9]', '', m.group(1))
if args.png:
filename = 'VPlan' + name + '.png'
print("Exporting " + name + " to PNG via dot: " + filename)
p = subprocess.Popen([dot, '-Tpng', '-o', filename],
encoding='utf-8',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=vplan)
if err:
raise RuntimeError("Error running dot: " + err)
else:
filename = 'VPlan' + name + '.dot'
print("Exporting " + name + " to DOT: " + filename)
with open(filename, 'w') as out:
out.write(vplan)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/extract_vplan.py |
#!/usr/bin/env python
# Given a -print-before-all -print-module-scope log from an opt invocation,
# chunk it into a series of individual IR files, one for each pass invocation.
# If the log ends with an obvious stack trace, try to split off a separate
# "crashinfo.txt" file leaving only the valid input IR in the last chunk.
# Files are written to current working directory.
from __future__ import print_function
import sys
basename = "chunk-"
chunk_id = 0
def print_chunk(lines):
global chunk_id
global basename
fname = basename + str(chunk_id) + ".ll"
chunk_id = chunk_id + 1
print("writing chunk " + fname + " (" + str(len(lines)) + " lines)")
with open(fname, "w") as f:
f.writelines(lines)
is_dump = False
cur = []
for line in sys.stdin:
if line.startswith("*** IR Dump Before "):
if len(cur) != 0:
print_chunk(cur);
cur = []
cur.append("; " + line)
elif line.startswith("Stack dump:"):
print_chunk(cur);
cur = []
cur.append(line)
is_dump = True
else:
cur.append(line)
if is_dump:
print("writing crashinfo.txt (" + str(len(cur)) + " lines)")
with open("crashinfo.txt", "w") as f:
f.writelines(cur)
else:
print_chunk(cur);
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/chunk-print-before-all.py |
#!/usr/bin/env python
"""Reduces GlobalISel failures.
This script is a utility to reduce tests that GlobalISel
fails to compile.
It runs llc to get the error message using a regex and creates
a custom command to check that specific error. Then, it runs bugpoint
with the custom command.
"""
from __future__ import print_function
import argparse
import re
import subprocess
import sys
import tempfile
import os
def log(msg):
print(msg)
def hr():
log('-' * 50)
def log_err(msg):
print('ERROR: {}'.format(msg), file=sys.stderr)
def check_path(path):
if not os.path.exists(path):
log_err('{} does not exist.'.format(path))
raise
return path
def check_bin(build_dir, bin_name):
file_name = '{}/bin/{}'.format(build_dir, bin_name)
return check_path(file_name)
def run_llc(llc, irfile):
pr = subprocess.Popen([llc,
'-o',
'-',
'-global-isel',
'-pass-remarks-missed=gisel',
irfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pr.communicate()
res = pr.wait()
if res == 0:
return 0
re_err = re.compile(
r'LLVM ERROR: ([a-z\s]+):.*(G_INTRINSIC[_A-Z]* <intrinsic:@[a-zA-Z0-9\.]+>|G_[A-Z_]+)')
match = re_err.match(err)
if not match:
return 0
else:
return [match.group(1), match.group(2)]
def run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp, ir_file):
compileCmd = '-compile-command={} -c {} {}'.format(
os.path.realpath(__file__), llc_bin, tmp)
pr = subprocess.Popen([bugpoint_bin,
'-compile-custom',
compileCmd,
'-opt-command={}'.format(opt_bin),
ir_file])
res = pr.wait()
if res != 0:
log_err("Unable to reduce the test.")
raise
def run_bugpoint_check():
path_to_llc = sys.argv[2]
path_to_err = sys.argv[3]
path_to_ir = sys.argv[4]
with open(path_to_err, 'r') as f:
err = f.read()
res = run_llc(path_to_llc, path_to_ir)
if res == 0:
return 0
log('GlobalISed failed, {}: {}'.format(res[0], res[1]))
if res != err.split(';'):
return 0
else:
return 1
def main():
# Check if this is called by bugpoint.
if len(sys.argv) == 5 and sys.argv[1] == '-c':
sys.exit(run_bugpoint_check())
# Parse arguments.
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('BuildDir', help="Path to LLVM build directory")
parser.add_argument('IRFile', help="Path to the input IR file")
args = parser.parse_args()
# Check if the binaries exist.
build_dir = check_path(args.BuildDir)
ir_file = check_path(args.IRFile)
llc_bin = check_bin(build_dir, 'llc')
opt_bin = check_bin(build_dir, 'opt')
bugpoint_bin = check_bin(build_dir, 'bugpoint')
# Run llc to see if GlobalISel fails.
log('Running llc...')
res = run_llc(llc_bin, ir_file)
if res == 0:
log_err("Expected failure")
raise
hr()
log('GlobalISel failed, {}: {}.'.format(res[0], res[1]))
tmp = tempfile.NamedTemporaryFile()
log('Writing error to {} for bugpoint.'.format(tmp.name))
tmp.write(';'.join(res))
tmp.flush()
hr()
# Run bugpoint.
log('Running bugpoint...')
run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp.name, ir_file)
hr()
log('Done!')
hr()
output_file = 'bugpoint-reduced-simplified.bc'
log('Run llvm-dis to disassemble the output:')
log('$ {}/bin/llvm-dis -o - {}'.format(build_dir, output_file))
log('Run llc to reproduce the problem:')
log('$ {}/bin/llc -o - -global-isel '
'-pass-remarks-missed=gisel {}'.format(build_dir, output_file))
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/bugpoint_gisel_reducer.py |
#!/usr/bin/env python3
'''
Merge .stats files generated by llvm tools
merge-stats.py takes as argument a list of stats files to merge
and output the result on stdout
Usage:
merge-stats.py $(find ./builddir/ -name "*.stats") > total.stats
'''
import json
import sys
result = {}
for arg in range(1, len(sys.argv)):
with open(sys.argv[arg], "r", encoding='utf-8',
errors='ignore') as f:
text = f.read()
try:
data = json.loads(text)
except:
print('ignored %s: failed to parse' % sys.argv[arg], file= sys.stderr)
continue
for key in data:
if key in result:
result[key] += data[key]
else:
result[key] = data[key]
out = json.dumps(result, indent=2)
print(out)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/merge-stats.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
Runs an executable on a remote host.
This is meant to be used as an executor when running the LLVM and the Libraries
tests on a target.
"""
import argparse
import os
import posixpath
import shlex
import subprocess
import sys
import tarfile
import tempfile
import re
def ssh(args, command):
cmd = ['ssh', '-oBatchMode=yes']
if args.extra_ssh_args is not None:
cmd.extend(shlex.split(args.extra_ssh_args))
return cmd + [args.host, command]
def scp(args, src, dst):
cmd = ['scp', '-q', '-oBatchMode=yes']
if args.extra_scp_args is not None:
cmd.extend(shlex.split(args.extra_scp_args))
return cmd + [src, '{}:{}'.format(args.host, dst)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, required=True)
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--extra-ssh-args', type=str, required=False)
parser.add_argument('--extra-scp-args', type=str, required=False)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=dict())
# Note: The default value is for the backward compatibility with a hack in
# libcxx test suite.
# If an argument is a file that ends in `.tmp.exe`, assume it is the name
# of an executable generated by a test file. We call these test-executables
# below. This allows us to do custom processing like codesigning test-executables
# and changing their path when running on the remote host. It's also possible
# for there to be no such executable, for example in the case of a .sh.cpp
# test.
parser.add_argument('--exec-pattern', type=str, required=False, default='.*\.tmp\.exe',
help='The name regex pattern of the executables generated by \
a test file. Specifying it allows us to do custom \
processing like codesigning test-executables \
and changing their path when running on \
the remote host. It\'s also possible for there \
to be no such executable, for example in \
the case of a .sh.cpp test.')
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
# Create a temporary directory where the test will be run.
# That is effectively the value of %T on the remote host.
tmp = subprocess.check_output(
ssh(args, 'mktemp -d /tmp/llvm.XXXXXXXXXX'),
universal_newlines=True
).strip()
isExecutable = lambda exe: re.match(args.exec_pattern, exe) and os.path.exists(exe)
pathOnRemote = lambda file: posixpath.join(tmp, os.path.basename(file))
try:
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isExecutable, commandLine):
subprocess.check_call(
['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe],
env={})
# tar up the execution directory (which contains everything that's needed
# to run the test), and copy the tarball over to the remote host.
try:
tmpTar = tempfile.NamedTemporaryFile(suffix='.tar', delete=False)
with tarfile.open(fileobj=tmpTar, mode='w') as tarball:
tarball.add(args.execdir, arcname=os.path.basename(args.execdir))
# Make sure we close the file before we scp it, because accessing
# the temporary file while still open doesn't work on Windows.
tmpTar.close()
remoteTarball = pathOnRemote(tmpTar.name)
subprocess.check_call(scp(args, tmpTar.name, remoteTarball))
finally:
# Make sure we close the file in case an exception happens before
# we've closed it above -- otherwise close() is idempotent.
tmpTar.close()
os.remove(tmpTar.name)
# Untar the dependencies in the temporary directory and remove the tarball.
remoteCommands = [
'tar -xf {} -C {} --strip-components 1'.format(remoteTarball, tmp),
'rm {}'.format(remoteTarball)
]
# Make sure all executables in the remote command line have 'execute'
# permissions on the remote host. The host that compiled the test-executable
# might not have a notion of 'executable' permissions.
for exe in filter(isExecutable, commandLine):
remoteCommands.append('chmod +x {}'.format(pathOnRemote(exe)))
# Execute the command through SSH in the temporary directory, with the
# correct environment. We tweak the command line to run it on the remote
# host by transforming the path of test-executables to their path in the
# temporary directory on the remote host.
for i, x in enumerate(commandLine):
if isExecutable(x):
commandLine[i] = pathOnRemote(x)
remoteCommands.append('cd {}'.format(tmp))
if args.env:
remoteCommands.append('export {}'.format(' '.join(args.env)))
remoteCommands.append(subprocess.list2cmdline(commandLine))
# Finally, SSH to the remote host and execute all the commands.
rc = subprocess.call(ssh(args, ' && '.join(remoteCommands)))
return rc
finally:
# Make sure the temporary directory is removed when we're done.
subprocess.check_call(ssh(args, 'rm -r {}'.format(tmp)))
if __name__ == '__main__':
exit(main())
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/remote-exec.py |
#!/usr/bin/env python
"""A ladder graph creation program.
This is a python program that creates c source code that will generate
CFGs that are ladder graphs. Ladder graphs are generally the worst case
for a lot of dominance related algorithms (Dominance frontiers, etc),
and often generate N^2 or worse behavior.
One good use of this program is to test whether your linear time algorithm is
really behaving linearly.
"""
from __future__ import print_function
import argparse
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('rungs', type=int,
help="Number of ladder rungs. Must be a multiple of 2")
args = parser.parse_args()
if (args.rungs % 2) != 0:
print("Rungs must be a multiple of 2")
return
print("int ladder(int *foo, int *bar, int x) {")
rung1 = range(0, args.rungs, 2)
rung2 = range(1, args.rungs, 2)
for i in rung1:
print("rung1%d:" % i)
print("*foo = x++;")
if i != rung1[-1]:
print("if (*bar) goto rung1%d;" % (i+2))
print("else goto rung2%d;" % (i+1))
else:
print("goto rung2%d;" % (i+1))
for i in rung2:
print("rung2%d:" % i)
print("*foo = x++;")
if i != rung2[-1]:
print("goto rung2%d;" % (i+2))
else:
print("return *foo;")
print("}")
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/create_ladder_graph.py |
#!/usr/bin/env python3
#
# ======- check-ninja-deps - build debugging script ----*- python -*--========#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ==------------------------------------------------------------------------==#
"""Script to find missing formal dependencies in a build.ninja file.
Suppose you have a header file that's autogenerated by (for example) Tablegen.
If a C++ compilation step needs to include that header, then it must be
executed after the Tablegen build step that generates the header. So the
dependency graph in build.ninja should have the Tablegen build step as an
ancestor of the C++ one. If it does not, then there's a latent build-failure
bug, because depending on the order that ninja chooses to schedule its build
steps, the C++ build step could run first, and fail because the header it needs
does not exist yet.
But because that kind of bug can easily be latent or intermittent, you might
not notice, if your local test build happens to succeed. What you'd like is a
way to detect problems of this kind reliably, even if they _didn't_ cause a
failure on your first test.
This script tries to do that. It's specific to the 'ninja' build tool, because
ninja has useful auxiliary output modes that produce the necessary data:
- 'ninja -t graph' emits the full DAG of formal dependencies derived from
build.ninja (in Graphviz format)
- 'ninja -t deps' dumps the database of dependencies discovered at build time
by finding out which headers each source file actually included
By cross-checking these two sources of data against each other, you can find
true dependencies shown by 'deps' that are not reflected as formal dependencies
in 'graph', i.e. a generated header that is required by a given source file but
not forced to be built first.
To run it:
- set up a build directory using ninja as the build tool (cmake -G Ninja)
- in that build directory, run ninja to perform an actual build (populating
the dependency database)
- then, in the same build directory, run this script. No arguments are needed
(but -C and -f are accepted, and propagated to ninja for convenience).
Requirements outside core Python: the 'pygraphviz' module, available via pip or
as the 'python3-pygraphviz' package in Debian and Ubuntu.
"""
import sys
import argparse
import subprocess
import pygraphviz
def toposort(g):
"""Topologically sort a graph.
The input g is a pygraphviz graph object representing a DAG. The function
yields the vertices of g in an arbitrary order consistent with the edges,
so that for any edge v->w, v is output before w."""
# Count the number of immediate predecessors *not yet output* for each
# vertex. Initially this is simply their in-degrees.
ideg = {v: g.in_degree(v) for v in g.nodes_iter()}
# Set of vertices which can be output next, which is true if they have no
# immediate predecessor that has not already been output.
ready = {v for v, d in ideg.items() if d == 0}
# Keep outputting vertices while we have any to output.
while len(ready) > 0:
v = next(iter(ready))
yield v
ready.remove(v)
# Having output v, find each immediate successor w, and decrement its
# 'ideg' value by 1, to indicate that one more of its predecessors has
# now been output.
for w in g.out_neighbors(v):
ideg[w] -= 1
if ideg[w] == 0:
# If that counter reaches zero, w is ready to output.
ready.add(w)
def ancestors(g, translate = lambda x: x):
"""Form the set of ancestors for each vertex of a graph.
The input g is a pygraphviz graph object representing a DAG. The function
yields a sequence of pairs (vertex, set of proper ancestors).
The vertex names are all mapped through 'translate' before output. This
allows us to produce output referring to the label rather than the
identifier of every vertex.
"""
# Store the set of (translated) ancestors for each vertex so far. a[v]
# includes (the translation of) v itself.
a = {}
for v in toposort(g):
vm = translate(v)
# Make up a[v], based on a[predecessors of v].
a[v] = {vm} # include v itself
for w in g.in_neighbors(v):
a[v].update(a[w])
# Remove v itself from the set before yielding it, so that the caller
# doesn't get the trivial dependency of v on itself.
yield vm, a[v].difference({vm})
def main():
parser = argparse.ArgumentParser(
description='Find missing formal dependencies on generated include '
'files in a build.ninja file.')
parser.add_argument("-C", "--build-dir",
help="Build directory (default cwd)")
parser.add_argument("-f", "--build-file",
help="Build directory (default build.ninja)")
args = parser.parse_args()
errs = 0
ninja_prefix = ["ninja"]
if args.build_dir is not None:
ninja_prefix.extend(["-C", args.build_dir])
if args.build_file is not None:
ninja_prefix.extend(["-f", args.build_file])
# Get the formal dependency graph and decode it using pygraphviz.
g = pygraphviz.AGraph(subprocess.check_output(
ninja_prefix + ["-t", "graph"]).decode("UTF-8"))
# Helper function to ask for the label of a vertex, which is where ninja's
# Graphviz output keeps the actual file name of the target.
label = lambda v: g.get_node(v).attr["label"]
# Start by making a list of build targets, i.e. generated files. These are
# just any graph vertex with at least one predecessor.
targets = set(label(v) for v in g.nodes_iter() if g.in_degree(v) > 0)
# Find the set of ancestors of each graph vertex. We pass in 'label' as a
# translation function, so that this gives us the set of ancestor _files_
# for a given _file_ rather than arbitrary numeric vertex ids.
deps = dict(ancestors(g, label))
# Fetch the cached dependency data and check it against our formal ancestry
# data.
currtarget = None
for line in (subprocess.check_output(ninja_prefix + ["-t", "deps"])
.decode("UTF-8").splitlines()):
# ninja -t deps output consists of stanzas of the following form,
# separated by a blank line:
#
# target: [other information we don't need]
# some_file.cpp
# some_header.h
# other_header.h
#
# We parse this ad-hoc by detecting the four leading spaces in a
# source-file line, and the colon in a target line. 'currtarget' stores
# the last target name we saw.
if line.startswith(" "):
dep = line[4:]
assert currtarget is not None, "Source file appeared before target"
# We're only interested in this dependency if it's a *generated*
# file, i.e. it is in our set of targets. Also, we must check that
# currtarget is actually a target we know about: the dependency
# cache is not cleared when build.ninja changes, so it can contain
# stale data from targets that existed only in past builds in the
# same directory.
if (dep in targets and currtarget in deps and
dep not in deps[currtarget]):
print("error:", currtarget, "requires", dep,
"but has no dependency on it", file=sys.stderr)
errs += 1
elif ":" in line:
currtarget = line.split(":", 1)[0]
if errs:
sys.exit("{:d} errors found".format(errs))
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/check_ninja_deps.py |
#!/usr/bin/env python3
"""A test case update script.
This script is a utility to update LLVM 'llc' based test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
"""
from __future__ import print_function
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
from UpdateTestChecks import asm, common
# llc is the only llc-like in the LLVM tree but downstream forks can add
# additional ones here if they have them.
LLC_LIKE_TOOLS = ('llc',)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--llc-binary', default=None,
help='The "llc" binary to use to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument(
'--extra_scrub', action='store_true',
help='Always use additional regex to further reduce diffs between various subtargets')
parser.add_argument(
'--x86_scrub_sp', action='store_true', default=True,
help='Use regex for x86 sp matching to reduce diffs between various subtargets')
parser.add_argument(
'--no_x86_scrub_sp', action='store_false', dest='x86_scrub_sp')
parser.add_argument(
'--x86_scrub_rip', action='store_true', default=True,
help='Use more regex for x86 rip matching to reduce diffs between various subtargets')
parser.add_argument(
'--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip')
parser.add_argument(
'--no_x86_scrub_mem_shuffle', action='store_true', default=False,
help='Reduce scrubbing shuffles with memory operands')
parser.add_argument('tests', nargs='+')
initial_args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
for ti in common.itertests(initial_args.tests, parser,
script_name='utils/' + script_name):
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
run_list = []
for l in ti.run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
commands = [cmd.strip() for cmd in l.split('|', 1)]
llc_cmd = commands[0]
llc_tool = llc_cmd.split(' ')[0]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple_in_cmd = m.groups()[0]
march_in_cmd = None
m = common.MARCH_ARG_RE.search(llc_cmd)
if m:
march_in_cmd = m.groups()[0]
filecheck_cmd = ''
if len(commands) > 1:
filecheck_cmd = commands[1]
common.verify_filecheck_prefixes(filecheck_cmd)
if llc_tool not in LLC_LIKE_TOOLS:
common.warn('Skipping non-llc RUN line: ' + l)
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: ' + l)
continue
llc_cmd_args = llc_cmd[len(llc_tool):].strip()
llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip()
if ti.path.endswith('.mir'):
llc_cmd_args += ' -x mir'
check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
run_list.append((check_prefixes, llc_cmd_args, triple_in_cmd, march_in_cmd))
if ti.path.endswith('.mir'):
check_indent = ' '
else:
check_indent = ''
builder = common.FunctionTestBuilder(
run_list=run_list,
flags=type('', (object,), {
'verbose': ti.args.verbose,
'function_signature': False,
'check_attributes': False}),
scrubber_args=[ti.args])
for prefixes, llc_args, triple_in_cmd, march_in_cmd in run_list:
common.debug('Extracted LLC cmd:', llc_tool, llc_args)
common.debug('Extracted FileCheck prefixes:', str(prefixes))
raw_tool_output = common.invoke_tool(ti.args.llc_binary or llc_tool,
llc_args, ti.path)
triple = triple_in_cmd or triple_in_ir
if not triple:
triple = asm.get_triple_from_march(march_in_cmd)
scrubber, function_re = asm.get_run_handler(triple)
builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes)
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
func_name = None
prefix_set = set([prefix for p in run_list for prefix in p[0]])
common.debug('Rewriting FileCheck prefixes:', str(prefix_set))
output_lines = []
include_generated_funcs = common.find_arg_in_test(ti,
lambda args: ti.args.include_generated_funcs,
'--include-generated-funcs',
True)
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# We can't predict where various passes might insert functions so we can't
# be sure the input function order is maintained. Therefore, first spit
# out all the source lines.
common.dump_input_lines(output_lines, ti, prefix_set, ';')
# Now generate all the checks.
common.add_checks_at_end(output_lines, run_list, builder.func_order(),
check_indent + ';',
lambda my_output_lines, prefixes, func:
asm.add_asm_checks(my_output_lines,
check_indent + ';',
prefixes, func_dict, func))
else:
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
args = input_info.args
if is_in_function_start:
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
asm.add_asm_checks(output_lines, check_indent + ';', run_list, func_dict, func_name)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
with open(ti.path, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_llc_test_checks.py |
#!/usr/bin/env python
# This creates a CSV file from the output of the debug output of subtarget:
# llvm-tblgen --gen-subtarget --debug-only=subtarget-emitter
# With thanks to Dave Estes for mentioning the idea at 2014 LLVM Developers' Meeting
import os;
import sys;
import re;
import operator;
table = {}
models = set()
filt = None
def add(instr, model, resource=None):
global table, models
entry = table.setdefault(instr, dict())
entry[model] = resource
models.add(model)
def filter_model(m):
global filt
if m and filt:
return filt.search(m) != None
else:
return True
def display():
global table, models
# remove default and itinerary so we can control their sort order to make
# them first
models.discard("default")
models.discard("itinerary")
ordered_table = sorted(table.items(), key=operator.itemgetter(0))
ordered_models = ["itinerary", "default"]
ordered_models.extend(sorted(models))
ordered_models = [m for m in ordered_models if filter_model(m)]
# print header
sys.stdout.write("instruction")
for model in ordered_models:
sys.stdout.write(", {}".format(model))
sys.stdout.write(os.linesep)
for (instr, mapping) in ordered_table:
sys.stdout.write(instr)
for model in ordered_models:
if model in mapping and mapping[model] is not None:
sys.stdout.write(", {}".format(mapping[model]))
else:
sys.stdout.write(", ")
sys.stdout.write(os.linesep)
def machineModelCover(path):
# The interesting bits
re_sched_default = re.compile("SchedRW machine model for ([^ ]*) (.*)\n");
re_sched_no_default = re.compile("No machine model for ([^ ]*)\n");
re_sched_spec = re.compile("InstRW on ([^ ]*) for ([^ ]*) (.*)\n");
re_sched_no_spec = re.compile("No machine model for ([^ ]*) on processor (.*)\n");
re_sched_itin = re.compile("Itinerary for ([^ ]*): ([^ ]*)\n")
# scan the file
with open(path, 'r') as f:
for line in f.readlines():
match = re_sched_default.match(line)
if match: add(match.group(1), "default", match.group(2))
match = re_sched_no_default.match(line)
if match: add(match.group(1), "default")
match = re_sched_spec.match(line)
if match: add(match.group(2), match.group(1), match.group(3))
match = re_sched_no_spec.match(line)
if match: add(match.group(1), match.group(2))
match = re_sched_itin.match(line)
if match: add(match.group(1), "itinerary", match.group(2))
display()
if len(sys.argv) > 2:
filt = re.compile(sys.argv[2], re.IGNORECASE)
machineModelCover(sys.argv[1])
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/schedcover.py |
#!/usr/bin/env python3
"""Updates FileCheck checks in MIR tests.
This script is a utility to update MIR based tests with new FileCheck
patterns.
The checks added by this script will cover the entire body of each
function it handles. Virtual registers used are given names via
FileCheck patterns, so if you do want to check a subset of the body it
should be straightforward to trim out the irrelevant parts. None of
the YAML metadata will be checked, other than function names.
If there are multiple llc commands in a test, the full set of checks
will be repeated for each different check pattern. Checks for patterns
that are common between different commands will be left as-is by
default, or removed if the --remove-common-prefixes flag is provided.
"""
from __future__ import print_function
import argparse
import collections
import glob
import os
import re
import subprocess
import sys
from UpdateTestChecks import common
MIR_FUNC_NAME_RE = re.compile(r' *name: *(?P<func>[A-Za-z0-9_.-]+)')
MIR_BODY_BEGIN_RE = re.compile(r' *body: *\|')
MIR_BASIC_BLOCK_RE = re.compile(r' *bb\.[0-9]+.*:$')
VREG_RE = re.compile(r'(%[0-9]+)(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?')
MI_FLAGS_STR= (
r'(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn '
r'|reassoc |nuw |nsw |exact |fpexcept )*')
VREG_DEF_RE = re.compile(
r'^ *(?P<vregs>{0}(?:, {0})*) = '
r'{1}(?P<opcode>[A-Zt][A-Za-z0-9_]+)'.format(VREG_RE.pattern, MI_FLAGS_STR))
MIR_PREFIX_DATA_RE = re.compile(r'^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)')
IR_FUNC_NAME_RE = re.compile(
r'^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\(')
IR_PREFIX_DATA_RE = re.compile(r'^ *(;|$)')
MIR_FUNC_RE = re.compile(
r'^---$'
r'\n'
r'^ *name: *(?P<func>[A-Za-z0-9_.-]+)$'
r'.*?'
r'^ *body: *\|\n'
r'(?P<body>.*?)\n'
r'^\.\.\.$',
flags=(re.M | re.S))
class LLC:
def __init__(self, bin):
self.bin = bin
def __call__(self, args, ir):
if ir.endswith('.mir'):
args = '{} -x mir'.format(args)
with open(ir) as ir_file:
stdout = subprocess.check_output('{} {}'.format(self.bin, args),
shell=True, stdin=ir_file)
if sys.version_info[0] > 2:
stdout = stdout.decode()
# Fix line endings to unix CR style.
stdout = stdout.replace('\r\n', '\n')
return stdout
class Run:
def __init__(self, prefixes, cmd_args, triple):
self.prefixes = prefixes
self.cmd_args = cmd_args
self.triple = triple
def __getitem__(self, index):
return [self.prefixes, self.cmd_args, self.triple][index]
def log(msg, verbose=True):
if verbose:
print(msg, file=sys.stderr)
def find_triple_in_ir(lines, verbose=False):
for l in lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
return m.group(1)
return None
def build_run_list(test, run_lines, verbose=False):
run_list = []
all_prefixes = []
for l in run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
commands = [cmd.strip() for cmd in l.split('|', 1)]
llc_cmd = commands[0]
filecheck_cmd = commands[1] if len(commands) > 1 else ''
common.verify_filecheck_prefixes(filecheck_cmd)
if not llc_cmd.startswith('llc '):
common.warn('Skipping non-llc RUN line: {}'.format(l), test_file=test)
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: {}'.format(l),
test_file=test)
continue
triple = None
m = common.TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple = m.group(1)
# If we find -march but not -mtriple, use that.
m = common.MARCH_ARG_RE.search(llc_cmd)
if m and not triple:
triple = '{}--'.format(m.group(1))
cmd_args = llc_cmd[len('llc'):].strip()
cmd_args = cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [
item
for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
all_prefixes += check_prefixes
run_list.append(Run(check_prefixes, cmd_args, triple))
# Remove any common prefixes. We'll just leave those entirely alone.
common_prefixes = set([prefix for prefix in all_prefixes
if all_prefixes.count(prefix) > 1])
for run in run_list:
run.prefixes = [p for p in run.prefixes if p not in common_prefixes]
return run_list, common_prefixes
def find_functions_with_one_bb(lines, verbose=False):
result = []
cur_func = None
bbs = 0
for line in lines:
m = MIR_FUNC_NAME_RE.match(line)
if m:
if bbs == 1:
result.append(cur_func)
cur_func = m.group('func')
bbs = 0
m = MIR_BASIC_BLOCK_RE.match(line)
if m:
bbs += 1
if bbs == 1:
result.append(cur_func)
return result
def build_function_body_dictionary(test, raw_tool_output, triple, prefixes,
func_dict, verbose):
for m in MIR_FUNC_RE.finditer(raw_tool_output):
func = m.group('func')
body = m.group('body')
if verbose:
log('Processing function: {}'.format(func))
for l in body.splitlines():
log(' {}'.format(l))
for prefix in prefixes:
if func in func_dict[prefix] and func_dict[prefix][func] != body:
common.warn('Found conflicting asm for prefix: {}'.format(prefix),
test_file=test)
func_dict[prefix][func] = body
def add_checks_for_function(test, output_lines, run_list, func_dict, func_name,
single_bb, verbose=False):
printed_prefixes = set()
for run in run_list:
for prefix in run.prefixes:
if prefix in printed_prefixes:
continue
if not func_dict[prefix][func_name]:
continue
# if printed_prefixes:
# # Add some space between different check prefixes.
# output_lines.append('')
printed_prefixes.add(prefix)
log('Adding {} lines for {}'.format(prefix, func_name), verbose)
add_check_lines(test, output_lines, prefix, func_name, single_bb,
func_dict[prefix][func_name].splitlines())
break
return output_lines
def add_check_lines(test, output_lines, prefix, func_name, single_bb,
func_body):
if single_bb:
# Don't bother checking the basic block label for a single BB
func_body.pop(0)
if not func_body:
common.warn('Function has no instructions to check: {}'.format(func_name),
test_file=test)
return
first_line = func_body[0]
indent = len(first_line) - len(first_line.lstrip(' '))
# A check comment, indented the appropriate amount
check = '{:>{}}; {}'.format('', indent, prefix)
output_lines.append('{}-LABEL: name: {}'.format(check, func_name))
vreg_map = {}
for func_line in func_body:
if not func_line.strip():
continue
m = VREG_DEF_RE.match(func_line)
if m:
for vreg in VREG_RE.finditer(m.group('vregs')):
name = mangle_vreg(m.group('opcode'), vreg_map.values())
vreg_map[vreg.group(1)] = name
func_line = func_line.replace(
vreg.group(1), '[[{}:%[0-9]+]]'.format(name), 1)
for number, name in vreg_map.items():
func_line = re.sub(r'{}\b'.format(number), '[[{}]]'.format(name),
func_line)
check_line = '{}: {}'.format(check, func_line[indent:]).rstrip()
output_lines.append(check_line)
def mangle_vreg(opcode, current_names):
base = opcode
# Simplify some common prefixes and suffixes
if opcode.startswith('G_'):
base = base[len('G_'):]
if opcode.endswith('_PSEUDO'):
base = base[:len('_PSEUDO')]
# Shorten some common opcodes with long-ish names
base = dict(IMPLICIT_DEF='DEF',
GLOBAL_VALUE='GV',
CONSTANT='C',
FCONSTANT='C',
MERGE_VALUES='MV',
UNMERGE_VALUES='UV',
INTRINSIC='INT',
INTRINSIC_W_SIDE_EFFECTS='INT',
INSERT_VECTOR_ELT='IVEC',
EXTRACT_VECTOR_ELT='EVEC',
SHUFFLE_VECTOR='SHUF').get(base, base)
# Avoid ambiguity when opcodes end in numbers
if len(base.rstrip('0123456789')) < len(base):
base += '_'
i = 0
for name in current_names:
if name.rstrip('0123456789') == base:
i += 1
if i:
return '{}{}'.format(base, i)
return base
def should_add_line_to_output(input_line, prefix_set):
# Skip any check lines that we're handling.
m = common.CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
return False
return True
def update_test_file(args, test):
with open(test) as fd:
input_lines = [l.rstrip() for l in fd]
script_name = os.path.basename(__file__)
first_line = input_lines[0] if input_lines else ""
if 'autogenerated' in first_line and script_name not in first_line:
common.warn("Skipping test which wasn't autogenerated by " +
script_name + ": " + test)
return
if args.update_only:
if not first_line or 'autogenerated' not in first_line:
common.warn("Skipping test which isn't autogenerated: " + test)
return
triple_in_ir = find_triple_in_ir(input_lines, args.verbose)
run_lines = common.find_run_lines(test, input_lines)
run_list, common_prefixes = build_run_list(test, run_lines, args.verbose)
simple_functions = find_functions_with_one_bb(input_lines, args.verbose)
func_dict = {}
for run in run_list:
for prefix in run.prefixes:
func_dict.update({prefix: dict()})
for prefixes, llc_args, triple_in_cmd in run_list:
log('Extracted LLC cmd: llc {}'.format(llc_args), args.verbose)
log('Extracted FileCheck prefixes: {}'.format(prefixes), args.verbose)
raw_tool_output = args.llc(llc_args, test)
if not triple_in_cmd and not triple_in_ir:
common.warn('No triple found: skipping file', test_file=test)
return
build_function_body_dictionary(test, raw_tool_output,
triple_in_cmd or triple_in_ir,
prefixes, func_dict, args.verbose)
state = 'toplevel'
func_name = None
prefix_set = set([prefix for run in run_list for prefix in run.prefixes])
log('Rewriting FileCheck prefixes: {}'.format(prefix_set), args.verbose)
if args.remove_common_prefixes:
prefix_set.update(common_prefixes)
elif common_prefixes:
common.warn('Ignoring common prefixes: {}'.format(common_prefixes),
test_file=test)
comment_char = '#' if test.endswith('.mir') else ';'
autogenerated_note = ('{} NOTE: Assertions have been autogenerated by '
'utils/{}'.format(comment_char, script_name))
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if input_line == autogenerated_note:
continue
if state == 'toplevel':
m = IR_FUNC_NAME_RE.match(input_line)
if m:
state = 'ir function prefix'
func_name = m.group('func')
if input_line.rstrip('| \r\n') == '---':
state = 'document'
output_lines.append(input_line)
elif state == 'document':
m = MIR_FUNC_NAME_RE.match(input_line)
if m:
state = 'mir function metadata'
func_name = m.group('func')
if input_line.strip() == '...':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'mir function metadata':
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
m = MIR_BODY_BEGIN_RE.match(input_line)
if m:
if func_name in simple_functions:
# If there's only one block, put the checks inside it
state = 'mir function prefix'
continue
state = 'mir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=False,
verbose=args.verbose)
elif state == 'mir function prefix':
m = MIR_PREFIX_DATA_RE.match(input_line)
if not m:
state = 'mir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=True,
verbose=args.verbose)
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'mir function body':
if input_line.strip() == '...':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'ir function prefix':
m = IR_PREFIX_DATA_RE.match(input_line)
if not m:
state = 'ir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=False,
verbose=args.verbose)
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'ir function body':
if input_line.strip() == '}':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
log('Writing {} lines to {}...'.format(len(output_lines), test), args.verbose)
with open(test, 'wb') as fd:
fd.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--llc-binary', dest='llc', default='llc', type=LLC,
help='The "llc" binary to generate the test case with')
parser.add_argument('--remove-common-prefixes', action='store_true',
help='Remove existing check lines whose prefixes are '
'shared between multiple commands')
parser.add_argument('tests', nargs='+')
args = common.parse_commandline_args(parser)
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test in test_paths:
try:
update_test_file(args, test)
except Exception:
common.warn('Error processing file', test_file=test)
raise
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_mir_test_checks.py |
#!/usr/bin/env python
#
# Given a previous good compile narrow down miscompiles.
# Expects two directories named "before" and "after" each containing a set of
# assembly or object files where the "after" version is assumed to be broken.
# You also have to provide a script called "link_test". It is called with a
# list of files which should be linked together and result tested. "link_test"
# should returns with exitcode 0 if the linking and testing succeeded.
#
# abtest.py operates by taking all files from the "before" directory and
# in each step replacing one of them with a file from the "bad" directory.
#
# Additionally you can perform the same steps with a single .s file. In this
# mode functions are identified by " -- Begin function FunctionName" and
# " -- End function" markers. The abtest.py then takes all
# function from the file in the "before" directory and replaces one function
# with the corresponding function from the "bad" file in each step.
#
# Example usage to identify miscompiled files:
# 1. Create a link_test script, make it executable. Simple Example:
# clang "$@" -o /tmp/test && /tmp/test || echo "PROBLEM"
# 2. Run the script to figure out which files are miscompiled:
# > ./abtest.py
# somefile.s: ok
# someotherfile.s: skipped: same content
# anotherfile.s: failed: './link_test' exitcode != 0
# ...
# Example usage to identify miscompiled functions inside a file:
# 3. Run the tests on a single file (assuming before/file.s and
# after/file.s exist)
# > ./abtest.py file.s
# funcname1 [0/XX]: ok
# funcname2 [1/XX]: ok
# funcname3 [2/XX]: skipped: same content
# funcname4 [3/XX]: failed: './link_test' exitcode != 0
# ...
from fnmatch import filter
from sys import stderr
import argparse
import filecmp
import os
import subprocess
import sys
LINKTEST = "./link_test"
ESCAPE = "\033[%sm"
BOLD = ESCAPE % "1"
RED = ESCAPE % "31"
NORMAL = ESCAPE % "0"
FAILED = RED + "failed" + NORMAL
def find(dir, file_filter=None):
files = [
walkdir[0]+"/"+file
for walkdir in os.walk(dir)
for file in walkdir[2]
]
if file_filter is not None:
files = filter(files, file_filter)
return sorted(files)
def error(message):
stderr.write("Error: %s\n" % (message,))
def warn(message):
stderr.write("Warning: %s\n" % (message,))
def info(message):
stderr.write("Info: %s\n" % (message,))
def announce_test(name):
stderr.write("%s%s%s: " % (BOLD, name, NORMAL))
stderr.flush()
def announce_result(result):
stderr.write(result)
stderr.write("\n")
stderr.flush()
def format_namelist(l):
result = ", ".join(l[0:3])
if len(l) > 3:
result += "... (%d total)" % len(l)
return result
def check_sanity(choices, perform_test):
announce_test("sanity check A")
all_a = {name: a_b[0] for name, a_b in choices}
res_a = perform_test(all_a)
if res_a is not True:
error("Picking all choices from A failed to pass the test")
sys.exit(1)
announce_test("sanity check B (expecting failure)")
all_b = {name: a_b[1] for name, a_b in choices}
res_b = perform_test(all_b)
if res_b is not False:
error("Picking all choices from B did unexpectedly pass the test")
sys.exit(1)
def check_sequentially(choices, perform_test):
known_good = set()
all_a = {name: a_b[0] for name, a_b in choices}
n = 1
for name, a_b in sorted(choices):
picks = dict(all_a)
picks[name] = a_b[1]
announce_test("checking %s [%d/%d]" % (name, n, len(choices)))
n += 1
res = perform_test(picks)
if res is True:
known_good.add(name)
return known_good
def check_bisect(choices, perform_test):
known_good = set()
if len(choices) == 0:
return known_good
choice_map = dict(choices)
all_a = {name: a_b[0] for name, a_b in choices}
def test_partition(partition, upcoming_partition):
# Compute the maximum number of checks we have to do in the worst case.
max_remaining_steps = len(partition) * 2 - 1
if upcoming_partition is not None:
max_remaining_steps += len(upcoming_partition) * 2 - 1
for x in partitions_to_split:
max_remaining_steps += (len(x) - 1) * 2
picks = dict(all_a)
for x in partition:
picks[x] = choice_map[x][1]
announce_test("checking %s [<=%d remaining]" %
(format_namelist(partition), max_remaining_steps))
res = perform_test(picks)
if res is True:
known_good.update(partition)
elif len(partition) > 1:
partitions_to_split.insert(0, partition)
# TODO:
# - We could optimize based on the knowledge that when splitting a failed
# partition into two and one side checks out okay then we can deduce that
# the other partition must be a failure.
all_choice_names = [name for name, _ in choices]
partitions_to_split = [all_choice_names]
while len(partitions_to_split) > 0:
partition = partitions_to_split.pop()
middle = len(partition) // 2
left = partition[0:middle]
right = partition[middle:]
if len(left) > 0:
test_partition(left, right)
assert len(right) > 0
test_partition(right, None)
return known_good
def extract_functions(file):
functions = []
in_function = None
for line in open(file):
marker = line.find(" -- Begin function ")
if marker != -1:
if in_function is not None:
warn("Missing end of function %s" % (in_function,))
funcname = line[marker + 19:-1]
in_function = funcname
text = line
continue
marker = line.find(" -- End function")
if marker != -1:
text += line
functions.append((in_function, text))
in_function = None
continue
if in_function is not None:
text += line
return functions
def replace_functions(source, dest, replacements):
out = open(dest, "w")
skip = False
in_function = None
for line in open(source):
marker = line.find(" -- Begin function ")
if marker != -1:
if in_function is not None:
warn("Missing end of function %s" % (in_function,))
funcname = line[marker + 19:-1]
in_function = funcname
replacement = replacements.get(in_function)
if replacement is not None:
out.write(replacement)
skip = True
else:
marker = line.find(" -- End function")
if marker != -1:
in_function = None
if skip:
skip = False
continue
if not skip:
out.write(line)
def testrun(files):
linkline = "%s %s" % (LINKTEST, " ".join(files),)
res = subprocess.call(linkline, shell=True)
if res != 0:
announce_result(FAILED + ": '%s' exitcode != 0" % LINKTEST)
return False
else:
announce_result("ok")
return True
def prepare_files(gooddir, baddir):
files_a = find(gooddir, "*")
files_b = find(baddir, "*")
basenames_a = set(map(os.path.basename, files_a))
basenames_b = set(map(os.path.basename, files_b))
for name in files_b:
basename = os.path.basename(name)
if basename not in basenames_a:
warn("There is no corresponding file to '%s' in %s" %
(name, gooddir))
choices = []
skipped = []
for name in files_a:
basename = os.path.basename(name)
if basename not in basenames_b:
warn("There is no corresponding file to '%s' in %s" %
(name, baddir))
file_a = gooddir + "/" + basename
file_b = baddir + "/" + basename
if filecmp.cmp(file_a, file_b):
skipped.append(basename)
continue
choice = (basename, (file_a, file_b))
choices.append(choice)
if len(skipped) > 0:
info("Skipped (same content): %s" % format_namelist(skipped))
def perform_test(picks):
files = []
# Note that we iterate over files_a so we don't change the order
# (cannot use `picks` as it is a dictionary without order)
for x in files_a:
basename = os.path.basename(x)
picked = picks.get(basename)
if picked is None:
assert basename in skipped
files.append(x)
else:
files.append(picked)
return testrun(files)
return perform_test, choices
def prepare_functions(to_check, gooddir, goodfile, badfile):
files_good = find(gooddir, "*")
functions_a = extract_functions(goodfile)
functions_a_map = dict(functions_a)
functions_b_map = dict(extract_functions(badfile))
for name in functions_b_map.keys():
if name not in functions_a_map:
warn("Function '%s' missing from good file" % name)
choices = []
skipped = []
for name, candidate_a in functions_a:
candidate_b = functions_b_map.get(name)
if candidate_b is None:
warn("Function '%s' missing from bad file" % name)
continue
if candidate_a == candidate_b:
skipped.append(name)
continue
choice = name, (candidate_a, candidate_b)
choices.append(choice)
if len(skipped) > 0:
info("Skipped (same content): %s" % format_namelist(skipped))
combined_file = '/tmp/combined2.s'
files = []
found_good_file = False
for c in files_good:
if os.path.basename(c) == to_check:
found_good_file = True
files.append(combined_file)
continue
files.append(c)
assert found_good_file
def perform_test(picks):
for name, x in picks.items():
assert x == functions_a_map[name] or x == functions_b_map[name]
replace_functions(goodfile, combined_file, picks)
return testrun(files)
return perform_test, choices
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--a', dest='dir_a', default='before')
parser.add_argument('--b', dest='dir_b', default='after')
parser.add_argument('--insane', help='Skip sanity check',
action='store_true')
parser.add_argument('--seq',
help='Check sequentially instead of bisection',
action='store_true')
parser.add_argument('file', metavar='file', nargs='?')
config = parser.parse_args()
gooddir = config.dir_a
baddir = config.dir_b
# Preparation phase: Creates a dictionary mapping names to a list of two
# choices each. The bisection algorithm will pick one choice for each name
# and then run the perform_test function on it.
if config.file is not None:
goodfile = gooddir + "/" + config.file
badfile = baddir + "/" + config.file
perform_test, choices = prepare_functions(config.file, gooddir,
goodfile, badfile)
else:
perform_test, choices = prepare_files(gooddir, baddir)
info("%d bisection choices" % len(choices))
# "Checking whether build environment is sane ..."
if not config.insane:
if not os.access(LINKTEST, os.X_OK):
error("Expect '%s' to be present and executable" % (LINKTEST,))
exit(1)
check_sanity(choices, perform_test)
if config.seq:
known_good = check_sequentially(choices, perform_test)
else:
known_good = check_bisect(choices, perform_test)
stderr.write("")
if len(known_good) != len(choices):
stderr.write("== Failing ==\n")
for name, _ in choices:
if name not in known_good:
stderr.write("%s\n" % name)
else:
# This shouldn't happen when the sanity check works...
# Maybe link_test isn't deterministic?
stderr.write("Could not identify failing parts?!?")
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/abtest.py |
#!/usr/bin/env python
from __future__ import print_function
'''Prepare a code coverage artifact.
- Collate raw profiles into one indexed profile.
- Generate html reports for the given binaries.
Caution: The positional arguments to this script must be specified before any
optional arguments, such as --restrict.
'''
import argparse
import glob
import os
import subprocess
import sys
def merge_raw_profiles(host_llvm_profdata, profile_data_dir, preserve_profiles):
print(':: Merging raw profiles...', end='')
sys.stdout.flush()
raw_profiles = glob.glob(os.path.join(profile_data_dir, '*.profraw'))
manifest_path = os.path.join(profile_data_dir, 'profiles.manifest')
profdata_path = os.path.join(profile_data_dir, 'Coverage.profdata')
with open(manifest_path, 'w') as manifest:
manifest.write('\n'.join(raw_profiles))
subprocess.check_call([host_llvm_profdata, 'merge', '-sparse', '-f',
manifest_path, '-o', profdata_path])
if not preserve_profiles:
for raw_profile in raw_profiles:
os.remove(raw_profile)
os.remove(manifest_path)
print('Done!')
return profdata_path
def prepare_html_report(host_llvm_cov, profile, report_dir, binaries,
restricted_dirs):
print(':: Preparing html report for {0}...'.format(binaries), end='')
sys.stdout.flush()
objects = []
for i, binary in enumerate(binaries):
if i == 0:
objects.append(binary)
else:
objects.extend(('-object', binary))
invocation = [host_llvm_cov, 'show'] + objects + ['-format', 'html',
'-instr-profile', profile, '-o', report_dir,
'-show-line-counts-or-regions', '-Xdemangler', 'c++filt',
'-Xdemangler', '-n'] + restricted_dirs
subprocess.check_call(invocation)
with open(os.path.join(report_dir, 'summary.txt'), 'wb') as Summary:
subprocess.check_call([host_llvm_cov, 'report'] + objects +
['-instr-profile', profile] + restricted_dirs,
stdout=Summary)
print('Done!')
def prepare_html_reports(host_llvm_cov, profdata_path, report_dir, binaries,
unified_report, restricted_dirs):
if unified_report:
prepare_html_report(host_llvm_cov, profdata_path, report_dir, binaries,
restricted_dirs)
else:
for binary in binaries:
binary_report_dir = os.path.join(report_dir,
os.path.basename(binary))
prepare_html_report(host_llvm_cov, profdata_path, binary_report_dir,
[binary], restricted_dirs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('host_llvm_profdata', help='Path to llvm-profdata')
parser.add_argument('host_llvm_cov', help='Path to llvm-cov')
parser.add_argument('profile_data_dir',
help='Path to the directory containing the raw profiles')
parser.add_argument('report_dir',
help='Path to the output directory for html reports')
parser.add_argument('binaries', metavar='B', type=str, nargs='*',
help='Path to an instrumented binary')
parser.add_argument('--only-merge', action='store_true',
help='Only merge raw profiles together, skip report '
'generation')
parser.add_argument('--preserve-profiles',
help='Do not delete raw profiles', action='store_true')
parser.add_argument('--use-existing-profdata',
help='Specify an existing indexed profile to use')
parser.add_argument('--unified-report', action='store_true',
help='Emit a unified report for all binaries')
parser.add_argument('--restrict', metavar='R', type=str, nargs='*',
default=[],
help='Restrict the reporting to the given source paths'
' (must be specified after all other positional arguments)')
args = parser.parse_args()
if args.use_existing_profdata and args.only_merge:
print('--use-existing-profdata and --only-merge are incompatible')
exit(1)
if args.use_existing_profdata:
profdata_path = args.use_existing_profdata
else:
profdata_path = merge_raw_profiles(args.host_llvm_profdata,
args.profile_data_dir,
args.preserve_profiles)
if not len(args.binaries):
print('No binaries specified, no work to do!')
exit(1)
if not args.only_merge:
prepare_html_reports(args.host_llvm_cov, profdata_path, args.report_dir,
args.binaries, args.unified_report, args.restrict)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/prepare-code-coverage-artifact.py |
#!/usr/bin/env python
"""A shuffle vector fuzz tester.
This is a python program to fuzz test the LLVM shufflevector instruction. It
generates a function with a random sequnece of shufflevectors, maintaining the
element mapping accumulated across the function. It then generates a main
function which calls it with a different value in each element and checks that
the result matches the expected mapping.
Take the output IR printed to stdout, compile it to an executable using whatever
set of transforms you want to test, and run the program. If it crashes, it found
a bug.
"""
from __future__ import print_function
import argparse
import itertools
import random
import sys
import uuid
def main():
element_types=['i8', 'i16', 'i32', 'i64', 'f32', 'f64']
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output')
parser.add_argument('--seed', default=str(uuid.uuid4()),
help='A string used to seed the RNG')
parser.add_argument('--max-shuffle-height', type=int, default=16,
help='Specify a fixed height of shuffle tree to test')
parser.add_argument('--no-blends', dest='blends', action='store_false',
help='Include blends of two input vectors')
parser.add_argument('--fixed-bit-width', type=int, choices=[128, 256],
help='Specify a fixed bit width of vector to test')
parser.add_argument('--fixed-element-type', choices=element_types,
help='Specify a fixed element type to test')
parser.add_argument('--triple',
help='Specify a triple string to include in the IR')
args = parser.parse_args()
random.seed(args.seed)
if args.fixed_element_type is not None:
element_types=[args.fixed_element_type]
if args.fixed_bit_width is not None:
if args.fixed_bit_width == 128:
width_map={'i64': 2, 'i32': 4, 'i16': 8, 'i8': 16, 'f64': 2, 'f32': 4}
(width, element_type) = random.choice(
[(width_map[t], t) for t in element_types])
elif args.fixed_bit_width == 256:
width_map={'i64': 4, 'i32': 8, 'i16': 16, 'i8': 32, 'f64': 4, 'f32': 8}
(width, element_type) = random.choice(
[(width_map[t], t) for t in element_types])
else:
sys.exit(1) # Checked above by argument parsing.
else:
width = random.choice([2, 4, 8, 16, 32, 64])
element_type = random.choice(element_types)
element_modulus = {
'i8': 1 << 8, 'i16': 1 << 16, 'i32': 1 << 32, 'i64': 1 << 64,
'f32': 1 << 32, 'f64': 1 << 64}[element_type]
shuffle_range = (2 * width) if args.blends else width
# Because undef (-1) saturates and is indistinguishable when testing the
# correctness of a shuffle, we want to bias our fuzz toward having a decent
# mixture of non-undef lanes in the end. With a deep shuffle tree, the
# probabilies aren't good so we need to bias things. The math here is that if
# we uniformly select between -1 and the other inputs, each element of the
# result will have the following probability of being undef:
#
# 1 - (shuffle_range/(shuffle_range+1))^max_shuffle_height
#
# More generally, for any probability P of selecting a defined element in
# a single shuffle, the end result is:
#
# 1 - P^max_shuffle_height
#
# The power of the shuffle height is the real problem, as we want:
#
# 1 - shuffle_range/(shuffle_range+1)
#
# So we bias the selection of undef at any given node based on the tree
# height. Below, let 'A' be 'len(shuffle_range)', 'C' be 'max_shuffle_height',
# and 'B' be the bias we use to compensate for
# C '((A+1)*A^(1/C))/(A*(A+1)^(1/C))':
#
# 1 - (B * A)/(A + 1)^C = 1 - A/(A + 1)
#
# So at each node we use:
#
# 1 - (B * A)/(A + 1)
# = 1 - ((A + 1) * A * A^(1/C))/(A * (A + 1) * (A + 1)^(1/C))
# = 1 - ((A + 1) * A^((C + 1)/C))/(A * (A + 1)^((C + 1)/C))
#
# This is the formula we use to select undef lanes in the shuffle.
A = float(shuffle_range)
C = float(args.max_shuffle_height)
undef_prob = 1.0 - (((A + 1.0) * pow(A, (C + 1.0)/C)) /
(A * pow(A + 1.0, (C + 1.0)/C)))
shuffle_tree = [[[-1 if random.random() <= undef_prob
else random.choice(range(shuffle_range))
for _ in itertools.repeat(None, width)]
for _ in itertools.repeat(None, args.max_shuffle_height - i)]
for i in range(args.max_shuffle_height)]
if args.verbose:
# Print out the shuffle sequence in a compact form.
print(('Testing shuffle sequence "%s" (v%d%s):' %
(args.seed, width, element_type)), file=sys.stderr)
for i, shuffles in enumerate(shuffle_tree):
print(' tree level %d:' % (i,), file=sys.stderr)
for j, s in enumerate(shuffles):
print(' shuffle %d: %s' % (j, s), file=sys.stderr)
print('', file=sys.stderr)
# Symbolically evaluate the shuffle tree.
inputs = [[int(j % element_modulus)
for j in range(i * width + 1, (i + 1) * width + 1)]
for i in range(args.max_shuffle_height + 1)]
results = inputs
for shuffles in shuffle_tree:
results = [[((results[i] if j < width else results[i + 1])[j % width]
if j != -1 else -1)
for j in s]
for i, s in enumerate(shuffles)]
if len(results) != 1:
print('ERROR: Bad results: %s' % (results,), file=sys.stderr)
sys.exit(1)
result = results[0]
if args.verbose:
print('Which transforms:', file=sys.stderr)
print(' from: %s' % (inputs,), file=sys.stderr)
print(' into: %s' % (result,), file=sys.stderr)
print('', file=sys.stderr)
# The IR uses silly names for floating point types. We also need a same-size
# integer type.
integral_element_type = element_type
if element_type == 'f32':
integral_element_type = 'i32'
element_type = 'float'
elif element_type == 'f64':
integral_element_type = 'i64'
element_type = 'double'
# Now we need to generate IR for the shuffle function.
subst = {'N': width, 'T': element_type, 'IT': integral_element_type}
print("""
define internal fastcc <%(N)d x %(T)s> @test(%(arguments)s) noinline nounwind {
entry:""" % dict(subst,
arguments=', '.join(
['<%(N)d x %(T)s> %%s.0.%(i)d' % dict(subst, i=i)
for i in range(args.max_shuffle_height + 1)])))
for i, shuffles in enumerate(shuffle_tree):
for j, s in enumerate(shuffles):
print("""
%%s.%(next_i)d.%(j)d = shufflevector <%(N)d x %(T)s> %%s.%(i)d.%(j)d, <%(N)d x %(T)s> %%s.%(i)d.%(next_j)d, <%(N)d x i32> <%(S)s>
""".strip('\n') % dict(subst, i=i, next_i=i + 1, j=j, next_j=j + 1,
S=', '.join(['i32 ' + (str(si) if si != -1 else 'undef')
for si in s])))
print("""
ret <%(N)d x %(T)s> %%s.%(i)d.0
}
""" % dict(subst, i=len(shuffle_tree)))
# Generate some string constants that we can use to report errors.
for i, r in enumerate(result):
if r != -1:
s = ('FAIL(%(seed)s): lane %(lane)d, expected %(result)d, found %%d\n\\0A' %
{'seed': args.seed, 'lane': i, 'result': r})
s += ''.join(['\\00' for _ in itertools.repeat(None, 128 - len(s) + 2)])
print("""
@error.%(i)d = private unnamed_addr global [128 x i8] c"%(s)s"
""".strip() % {'i': i, 's': s})
# Define a wrapper function which is marked 'optnone' to prevent
# interprocedural optimizations from deleting the test.
print("""
define internal fastcc <%(N)d x %(T)s> @test_wrapper(%(arguments)s) optnone noinline {
%%result = call fastcc <%(N)d x %(T)s> @test(%(arguments)s)
ret <%(N)d x %(T)s> %%result
}
""" % dict(subst,
arguments=', '.join(['<%(N)d x %(T)s> %%s.%(i)d' % dict(subst, i=i)
for i in range(args.max_shuffle_height + 1)])))
# Finally, generate a main function which will trap if any lanes are mapped
# incorrectly (in an observable way).
print("""
define i32 @main() {
entry:
; Create a scratch space to print error messages.
%%str = alloca [128 x i8]
%%str.ptr = getelementptr inbounds [128 x i8], [128 x i8]* %%str, i32 0, i32 0
; Build the input vector and call the test function.
%%v = call fastcc <%(N)d x %(T)s> @test_wrapper(%(inputs)s)
; We need to cast this back to an integer type vector to easily check the
; result.
%%v.cast = bitcast <%(N)d x %(T)s> %%v to <%(N)d x %(IT)s>
br label %%test.0
""" % dict(subst,
inputs=', '.join(
[('<%(N)d x %(T)s> bitcast '
'(<%(N)d x %(IT)s> <%(input)s> to <%(N)d x %(T)s>)' %
dict(subst, input=', '.join(['%(IT)s %(i)d' % dict(subst, i=i)
for i in input])))
for input in inputs])))
# Test that each non-undef result lane contains the expected value.
for i, r in enumerate(result):
if r == -1:
print("""
test.%(i)d:
; Skip this lane, its value is undef.
br label %%test.%(next_i)d
""" % dict(subst, i=i, next_i=i + 1))
else:
print("""
test.%(i)d:
%%v.%(i)d = extractelement <%(N)d x %(IT)s> %%v.cast, i32 %(i)d
%%cmp.%(i)d = icmp ne %(IT)s %%v.%(i)d, %(r)d
br i1 %%cmp.%(i)d, label %%die.%(i)d, label %%test.%(next_i)d
die.%(i)d:
; Capture the actual value and print an error message.
%%tmp.%(i)d = zext %(IT)s %%v.%(i)d to i2048
%%bad.%(i)d = trunc i2048 %%tmp.%(i)d to i32
call i32 (i8*, i8*, ...) @sprintf(i8* %%str.ptr, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @error.%(i)d, i32 0, i32 0), i32 %%bad.%(i)d)
%%length.%(i)d = call i32 @strlen(i8* %%str.ptr)
call i32 @write(i32 2, i8* %%str.ptr, i32 %%length.%(i)d)
call void @llvm.trap()
unreachable
""" % dict(subst, i=i, next_i=i + 1, r=r))
print("""
test.%d:
ret i32 0
}
declare i32 @strlen(i8*)
declare i32 @write(i32, i8*, i32)
declare i32 @sprintf(i8*, i8*, ...)
declare void @llvm.trap() noreturn nounwind
""" % (len(result),))
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/shuffle_fuzz.py |
#!/usr/bin/env python3
"""A test case update script.
This script is a utility to update LLVM 'llvm-mca' based test cases with new
FileCheck patterns.
"""
import argparse
from collections import defaultdict
import glob
import os
import sys
import warnings
from UpdateTestChecks import common
COMMENT_CHAR = '#'
ADVERT_PREFIX = '{} NOTE: Assertions have been autogenerated by '.format(
COMMENT_CHAR)
ADVERT = '{}utils/{}'.format(ADVERT_PREFIX, os.path.basename(__file__))
class Error(Exception):
""" Generic Error that can be raised without printing a traceback.
"""
pass
def _warn(msg):
""" Log a user warning to stderr.
"""
warnings.warn(msg, Warning, stacklevel=2)
def _configure_warnings(args):
warnings.resetwarnings()
if args.w:
warnings.simplefilter('ignore')
if args.Werror:
warnings.simplefilter('error')
def _showwarning(message, category, filename, lineno, file=None, line=None):
""" Version of warnings.showwarning that won't attempt to print out the
line at the location of the warning if the line text is not explicitly
specified.
"""
if file is None:
file = sys.stderr
if line is None:
line = ''
file.write(warnings.formatwarning(message, category, filename, lineno, line))
def _parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-w',
action='store_true',
help='suppress warnings')
parser.add_argument('-Werror',
action='store_true',
help='promote warnings to errors')
parser.add_argument('--llvm-mca-binary',
metavar='<path>',
default='llvm-mca',
help='the binary to use to generate the test case '
'(default: llvm-mca)')
parser.add_argument('tests',
metavar='<test-path>',
nargs='+')
args = common.parse_commandline_args(parser)
_configure_warnings(args)
if not args.llvm_mca_binary:
raise Error('--llvm-mca-binary value cannot be empty string')
if 'llvm-mca' not in os.path.basename(args.llvm_mca_binary):
_warn('unexpected binary name: {}'.format(args.llvm_mca_binary))
return args
def _get_run_infos(run_lines, args):
run_infos = []
for run_line in run_lines:
try:
(tool_cmd, filecheck_cmd) = tuple([cmd.strip()
for cmd in run_line.split('|', 1)])
except ValueError:
_warn('could not split tool and filecheck commands: {}'.format(run_line))
continue
common.verify_filecheck_prefixes(filecheck_cmd)
tool_basename = os.path.splitext(os.path.basename(args.llvm_mca_binary))[0]
if not tool_cmd.startswith(tool_basename + ' '):
_warn('skipping non-{} RUN line: {}'.format(tool_basename, run_line))
continue
if not filecheck_cmd.startswith('FileCheck '):
_warn('skipping non-FileCheck RUN line: {}'.format(run_line))
continue
tool_cmd_args = tool_cmd[len(tool_basename):].strip()
tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [item
for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
run_infos.append((check_prefixes, tool_cmd_args))
return run_infos
def _break_down_block(block_info, common_prefix):
""" Given a block_info, see if we can analyze it further to let us break it
down by prefix per-line rather than per-block.
"""
texts = block_info.keys()
prefixes = list(block_info.values())
# Split the lines from each of the incoming block_texts and zip them so that
# each element contains the corresponding lines from each text. E.g.
#
# block_text_1: A # line 1
# B # line 2
#
# block_text_2: A # line 1
# C # line 2
#
# would become:
#
# [(A, A), # line 1
# (B, C)] # line 2
#
line_tuples = list(zip(*list((text.splitlines() for text in texts))))
# To simplify output, we'll only proceed if the very first line of the block
# texts is common to each of them.
if len(set(line_tuples[0])) != 1:
return []
result = []
lresult = defaultdict(list)
for i, line in enumerate(line_tuples):
if len(set(line)) == 1:
# We're about to output a line with the common prefix. This is a sync
# point so flush any batched-up lines one prefix at a time to the output
# first.
for prefix in sorted(lresult):
result.extend(lresult[prefix])
lresult = defaultdict(list)
# The line is common to each block so output with the common prefix.
result.append((common_prefix, line[0]))
else:
# The line is not common to each block, or we don't have a common prefix.
# If there are no prefixes available, warn and bail out.
if not prefixes[0]:
_warn('multiple lines not disambiguated by prefixes:\n{}\n'
'Some blocks may be skipped entirely as a result.'.format(
'\n'.join(' - {}'.format(l) for l in line)))
return []
# Iterate through the line from each of the blocks and add the line with
# the corresponding prefix to the current batch of results so that we can
# later output them per-prefix.
for i, l in enumerate(line):
for prefix in prefixes[i]:
lresult[prefix].append((prefix, l))
# Flush any remaining batched-up lines one prefix at a time to the output.
for prefix in sorted(lresult):
result.extend(lresult[prefix])
return result
def _get_useful_prefix_info(run_infos):
""" Given the run_infos, calculate any prefixes that are common to every one,
and the length of the longest prefix string.
"""
try:
all_sets = [set(s) for s in list(zip(*run_infos))[0]]
common_to_all = set.intersection(*all_sets)
longest_prefix_len = max(len(p) for p in set.union(*all_sets))
except IndexError:
common_to_all = []
longest_prefix_len = 0
else:
if len(common_to_all) > 1:
_warn('Multiple prefixes common to all RUN lines: {}'.format(
common_to_all))
if common_to_all:
common_to_all = sorted(common_to_all)[0]
return common_to_all, longest_prefix_len
def _align_matching_blocks(all_blocks, farthest_indexes):
""" Some sub-sequences of blocks may be common to multiple lists of blocks,
but at different indexes in each one.
For example, in the following case, A,B,E,F, and H are common to both
sets, but only A and B would be identified as such due to the indexes
matching:
index | 0 1 2 3 4 5 6
------+--------------
setA | A B C D E F H
setB | A B E F G H
This function attempts to align the indexes of matching blocks by
inserting empty blocks into the block list. With this approach, A, B, E,
F, and H would now be able to be identified as matching blocks:
index | 0 1 2 3 4 5 6 7
------+----------------
setA | A B C D E F H
setB | A B E F G H
"""
# "Farthest block analysis": essentially, iterate over all blocks and find
# the highest index into a block list for the first instance of each block.
# This is relatively expensive, but we're dealing with small numbers of
# blocks so it doesn't make a perceivable difference to user time.
for blocks in all_blocks.values():
for block in blocks:
if not block:
continue
index = blocks.index(block)
if index > farthest_indexes[block]:
farthest_indexes[block] = index
# Use the results of the above analysis to identify any blocks that can be
# shunted along to match the farthest index value.
for blocks in all_blocks.values():
for index, block in enumerate(blocks):
if not block:
continue
changed = False
# If the block has not already been subject to alignment (i.e. if the
# previous block is not empty) then insert empty blocks until the index
# matches the farthest index identified for that block.
if (index > 0) and blocks[index - 1]:
while(index < farthest_indexes[block]):
blocks.insert(index, '')
index += 1
changed = True
if changed:
# Bail out. We'll need to re-do the farthest block analysis now that
# we've inserted some blocks.
return True
return False
def _get_block_infos(run_infos, test_path, args, common_prefix): # noqa
""" For each run line, run the tool with the specified args and collect the
output. We use the concept of 'blocks' for uniquing, where a block is
a series of lines of text with no more than one newline character between
each one. For example:
This
is
one
block
This is
another block
This is yet another block
We then build up a 'block_infos' structure containing a dict where the
text of each block is the key and a list of the sets of prefixes that may
generate that particular block. This then goes through a series of
transformations to minimise the amount of CHECK lines that need to be
written by taking advantage of common prefixes.
"""
def _block_key(tool_args, prefixes):
""" Get a hashable key based on the current tool_args and prefixes.
"""
return ' '.join([tool_args] + prefixes)
all_blocks = {}
max_block_len = 0
# A cache of the furthest-back position in any block list of the first
# instance of each block, indexed by the block itself.
farthest_indexes = defaultdict(int)
# Run the tool for each run line to generate all of the blocks.
for prefixes, tool_args in run_infos:
key = _block_key(tool_args, prefixes)
raw_tool_output = common.invoke_tool(args.llvm_mca_binary,
tool_args,
test_path)
# Replace any lines consisting of purely whitespace with empty lines.
raw_tool_output = '\n'.join(line if line.strip() else ''
for line in raw_tool_output.splitlines())
# Split blocks, stripping all trailing whitespace, but keeping preceding
# whitespace except for newlines so that columns will line up visually.
all_blocks[key] = [b.lstrip('\n').rstrip()
for b in raw_tool_output.split('\n\n')]
max_block_len = max(max_block_len, len(all_blocks[key]))
# Attempt to align matching blocks until no more changes can be made.
made_changes = True
while made_changes:
made_changes = _align_matching_blocks(all_blocks, farthest_indexes)
# If necessary, pad the lists of blocks with empty blocks so that they are
# all the same length.
for key in all_blocks:
len_to_pad = max_block_len - len(all_blocks[key])
all_blocks[key] += [''] * len_to_pad
# Create the block_infos structure where it is a nested dict in the form of:
# block number -> block text -> list of prefix sets
block_infos = defaultdict(lambda: defaultdict(list))
for prefixes, tool_args in run_infos:
key = _block_key(tool_args, prefixes)
for block_num, block_text in enumerate(all_blocks[key]):
block_infos[block_num][block_text].append(set(prefixes))
# Now go through the block_infos structure and attempt to smartly prune the
# number of prefixes per block to the minimal set possible to output.
for block_num in range(len(block_infos)):
# When there are multiple block texts for a block num, remove any
# prefixes that are common to more than one of them.
# E.g. [ [{ALL,FOO}] , [{ALL,BAR}] ] -> [ [{FOO}] , [{BAR}] ]
all_sets = [s for s in block_infos[block_num].values()]
pruned_sets = []
for i, setlist in enumerate(all_sets):
other_set_values = set([elem for j, setlist2 in enumerate(all_sets)
for set_ in setlist2 for elem in set_
if i != j])
pruned_sets.append([s - other_set_values for s in setlist])
for i, block_text in enumerate(block_infos[block_num]):
# When a block text matches multiple sets of prefixes, try removing any
# prefixes that aren't common to all of them.
# E.g. [ {ALL,FOO} , {ALL,BAR} ] -> [{ALL}]
common_values = set.intersection(*pruned_sets[i])
if common_values:
pruned_sets[i] = [common_values]
# Everything should be uniqued as much as possible by now. Apply the
# newly pruned sets to the block_infos structure.
# If there are any blocks of text that still match multiple prefixes,
# output a warning.
current_set = set()
for s in pruned_sets[i]:
s = sorted(list(s))
if s:
current_set.add(s[0])
if len(s) > 1:
_warn('Multiple prefixes generating same output: {} '
'(discarding {})'.format(','.join(s), ','.join(s[1:])))
if block_text and not current_set:
raise Error(
'block not captured by existing prefixes:\n\n{}'.format(block_text))
block_infos[block_num][block_text] = sorted(list(current_set))
# If we have multiple block_texts, try to break them down further to avoid
# the case where we have very similar block_texts repeated after each
# other.
if common_prefix and len(block_infos[block_num]) > 1:
# We'll only attempt this if each of the block_texts have the same number
# of lines as each other.
same_num_Lines = (len(set(len(k.splitlines())
for k in block_infos[block_num].keys())) == 1)
if same_num_Lines:
breakdown = _break_down_block(block_infos[block_num], common_prefix)
if breakdown:
block_infos[block_num] = breakdown
return block_infos
def _write_block(output, block, not_prefix_set, common_prefix, prefix_pad):
""" Write an individual block, with correct padding on the prefixes.
Returns a set of all of the prefixes that it has written.
"""
end_prefix = ': '
previous_prefix = None
num_lines_of_prefix = 0
written_prefixes = set()
for prefix, line in block:
if prefix in not_prefix_set:
_warn('not writing for prefix {0} due to presence of "{0}-NOT:" '
'in input file.'.format(prefix))
continue
# If the previous line isn't already blank and we're writing more than one
# line for the current prefix output a blank line first, unless either the
# current of previous prefix is common to all.
num_lines_of_prefix += 1
if prefix != previous_prefix:
if output and output[-1]:
if num_lines_of_prefix > 1 or any(p == common_prefix
for p in (prefix, previous_prefix)):
output.append('')
num_lines_of_prefix = 0
previous_prefix = prefix
written_prefixes.add(prefix)
output.append(
'{} {}{}{} {}'.format(COMMENT_CHAR,
prefix,
end_prefix,
' ' * (prefix_pad - len(prefix)),
line).rstrip())
end_prefix = '-NEXT:'
output.append('')
return written_prefixes
def _write_output(test_path, input_lines, prefix_list, block_infos, # noqa
args, common_prefix, prefix_pad):
prefix_set = set([prefix for prefixes, _ in prefix_list
for prefix in prefixes])
not_prefix_set = set()
output_lines = []
for input_line in input_lines:
if input_line.startswith(ADVERT_PREFIX):
continue
if input_line.startswith(COMMENT_CHAR):
m = common.CHECK_RE.match(input_line)
try:
prefix = m.group(1)
except AttributeError:
prefix = None
if '{}-NOT:'.format(prefix) in input_line:
not_prefix_set.add(prefix)
if prefix not in prefix_set or prefix in not_prefix_set:
output_lines.append(input_line)
continue
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line)
# Skip empty lines if the previous output line is also empty.
if input_line or output_lines[-1]:
output_lines.append(input_line)
else:
continue
# Add a blank line before the new checks if required.
if len(output_lines) > 0 and output_lines[-1]:
output_lines.append('')
output_check_lines = []
used_prefixes = set()
for block_num in range(len(block_infos)):
if type(block_infos[block_num]) is list:
# The block is of the type output from _break_down_block().
used_prefixes |= _write_block(output_check_lines,
block_infos[block_num],
not_prefix_set,
common_prefix,
prefix_pad)
else:
# _break_down_block() was unable to do do anything so output the block
# as-is.
# Rather than writing out each block as soon we encounter it, save it
# indexed by prefix so that we can write all of the blocks out sorted by
# prefix at the end.
output_blocks = defaultdict(list)
for block_text in sorted(block_infos[block_num]):
if not block_text:
continue
lines = block_text.split('\n')
for prefix in block_infos[block_num][block_text]:
assert prefix not in output_blocks
used_prefixes |= _write_block(output_blocks[prefix],
[(prefix, line) for line in lines],
not_prefix_set,
common_prefix,
prefix_pad)
for prefix in sorted(output_blocks):
output_check_lines.extend(output_blocks[prefix])
unused_prefixes = (prefix_set - not_prefix_set) - used_prefixes
if unused_prefixes:
raise Error('unused prefixes: {}'.format(sorted(unused_prefixes)))
if output_check_lines:
output_lines.insert(0, ADVERT)
output_lines.extend(output_check_lines)
# The file should not end with two newlines. It creates unnecessary churn.
while len(output_lines) > 0 and output_lines[-1] == '':
output_lines.pop()
if input_lines == output_lines:
sys.stderr.write(' [unchanged]\n')
return
sys.stderr.write(' [{} lines total]\n'.format(len(output_lines)))
common.debug('Writing', len(output_lines), 'lines to', test_path, '..\n\n')
with open(test_path, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main():
args = _parse_args()
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test_path in test_paths:
sys.stderr.write('Test: {}\n'.format(test_path))
# Call this per test. By default each warning will only be written once
# per source location. Reset the warning filter so that now each warning
# will be written once per source location per test.
_configure_warnings(args)
if not os.path.isfile(test_path):
raise Error('could not find test file: {}'.format(test_path))
with open(test_path) as f:
input_lines = [l.rstrip() for l in f]
run_lines = common.find_run_lines(test_path, input_lines)
run_infos = _get_run_infos(run_lines, args)
common_prefix, prefix_pad = _get_useful_prefix_info(run_infos)
block_infos = _get_block_infos(run_infos, test_path, args, common_prefix)
_write_output(test_path,
input_lines,
run_infos,
block_infos,
args,
common_prefix,
prefix_pad)
return 0
if __name__ == '__main__':
try:
warnings.showwarning = _showwarning
sys.exit(main())
except Error as e:
sys.stdout.write('error: {}\n'.format(e))
sys.exit(1)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/update_mca_test_checks.py |
#!/usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/benchmark/mingw.py |
#!/usr/bin/env python
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def process_asm(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/benchmark/tools/strip_asm.py |
#!/usr/bin/env python
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline)
json2 = json2_orig = gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
# Diff and output
output_lines = gbench.report.generate_difference_report(json1, json2)
print(description)
for ln in output_lines:
print(ln)
import unittest
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/benchmark/tools/compare.py |
"""util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import tempfile
import subprocess
import sys
# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, mode='rb') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == b'MZ'
else:
return magic_bytes == b'\x7FELF'
def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except:
pass
return False
def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg
def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith('--') and prefix.endswith('=')
result = None
for f in benchmark_flags:
if f.startswith(prefix):
result = f[len(prefix):]
return result
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
with open(fname, 'r') as f:
return json.load(f)
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
output_name = find_benchmark_flag('--benchmark_out=',
benchmark_flags)
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(output_name)
if is_temp_output:
os.unlink(output_name)
return json_res
def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
else:
assert False # This branch is unreachable | MDL-SDK-master | src/mdl/jit/llvm/dist/utils/benchmark/tools/gbench/util.py |
"""Google Benchmark tooling"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/benchmark/tools/gbench/__init__.py |
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os
import re
import copy
class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))
def __format__(self, format):
return self.code
# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
def filter_benchmark(json_orig, family, replacement=""):
"""
Apply a filter to the json, and only leave the 'family' of benchmarks.
"""
regex = re.compile(family)
filtered = {}
filtered['benchmarks'] = []
for be in json_orig['benchmarks']:
if not regex.search(be['name']):
continue
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
filtered['benchmarks'].append(filteredbench)
return filtered
def generate_difference_report(json1, json2, use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
first_col_width = find_longest_name(json1['benchmarks'])
def find_test(name):
for b in json2['benchmarks']:
if b['name'] == name:
return b
return None
first_col_width = max(first_col_width, len('Benchmark'))
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
for bn in gen:
other_bench = find_test(bn['name'])
if not other_bench:
continue
if bn['time_unit'] != other_bench['time_unit']:
continue
def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
tres = calculate_change(bn['real_time'], other_bench['real_time'])
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
output_strs += [color_format(use_color, fmt_str,
BC_HEADER, bn['name'], first_col_width,
get_color(tres), tres, get_color(cpures), cpures,
bn['real_time'], other_bench['real_time'],
bn['cpu_time'], other_bench['cpu_time'],
endc=BC_ENDC)]
return output_strs
###############################################################################
# Unit tests
import unittest
class TestReportDifference(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_basic(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(parts, expect_lines[i])
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
def load_result(self):
import json
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
testOutput = os.path.join(testInputs, 'test2_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
json = self.load_result()
json1 = filter_benchmark(json, "BM_Z.ro", ".")
json2 = filter_benchmark(json, "BM_O.e", ".")
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(parts, expect_lines[i])
if __name__ == '__main__':
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/benchmark/tools/gbench/report.py |
from __future__ import print_function
import struct
import sys
import gdb.printing
import gdb.types
class Iterator:
def __iter__(self):
return self
if sys.version_info.major == 2:
def next(self):
return self.__next__()
def children(self):
return self
def escape_bytes(val, l):
return '"' + val.string(encoding='Latin-1', length=l).encode('unicode_escape').decode() + '"'
class SmallStringPrinter:
"""Print an llvm::SmallString object."""
def __init__(self, val):
self.val = val
def to_string(self):
begin = self.val['BeginX']
return escape_bytes(begin.cast(gdb.lookup_type('char').pointer()), self.val['Size'])
class StringRefPrinter:
"""Print an llvm::StringRef object."""
def __init__(self, val):
self.val = val
def to_string(self):
return escape_bytes(self.val['Data'], self.val['Length'])
class SmallVectorPrinter(Iterator):
"""Print an llvm::SmallVector object."""
def __init__(self, val):
self.val = val
t = val.type.template_argument(0).pointer()
self.begin = val['BeginX'].cast(t)
self.size = val['Size']
self.i = 0
def __next__(self):
if self.i == self.size:
raise StopIteration
ret = '[{}]'.format(self.i), (self.begin+self.i).dereference()
self.i += 1
return ret
def to_string(self):
return 'llvm::SmallVector of Size {}, Capacity {}'.format(self.size, self.val['Capacity'])
def display_hint (self):
return 'array'
class ArrayRefPrinter:
"""Print an llvm::ArrayRef object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
if sys.version_info.major == 2:
next = __next__
def __init__(self, val):
self.val = val
def children(self):
data = self.val['Data']
return self._iterator(data, data + self.val['Length'])
def to_string(self):
return 'llvm::ArrayRef of length %d' % (self.val['Length'])
def display_hint (self):
return 'array'
class ExpectedPrinter(Iterator):
"""Print an llvm::Expected object."""
def __init__(self, val):
self.val = val
def __next__(self):
val = self.val
if val is None:
raise StopIteration
self.val = None
if val['HasError']:
return ('error', val['ErrorStorage'].address.cast(
gdb.lookup_type('llvm::ErrorInfoBase').pointer()).dereference())
return ('value', val['TStorage'].address.cast(
val.type.template_argument(0).pointer()).dereference())
def to_string(self):
return 'llvm::Expected{}'.format(' is error' if self.val['HasError'] else '')
class OptionalPrinter(Iterator):
"""Print an llvm::Optional object."""
def __init__(self, val):
self.val = val
def __next__(self):
val = self.val
if val is None:
raise StopIteration
self.val = None
if not val['Storage']['hasVal']:
raise StopIteration
return ('value', val['Storage']['value'])
def to_string(self):
return 'llvm::Optional{}'.format('' if self.val['Storage']['hasVal'] else ' is not initialized')
class DenseMapPrinter:
"Print a DenseMap"
class _iterator:
def __init__(self, key_info_t, begin, end):
self.key_info_t = key_info_t
self.cur = begin
self.end = end
self.advancePastEmptyBuckets()
self.first = True
def __iter__(self):
return self
def advancePastEmptyBuckets(self):
# disabled until the comments below can be addressed
# keeping as notes/posterity/hints for future contributors
return
n = self.key_info_t.name
is_equal = gdb.parse_and_eval(n + '::isEqual')
empty = gdb.parse_and_eval(n + '::getEmptyKey()')
tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
# the following is invalid, GDB fails with:
# Python Exception <class 'gdb.error'> Attempt to take address of value
# not located in memory.
# because isEqual took parameter (for the unsigned long key I was testing)
# by const ref, and GDB
# It's also not entirely general - we should be accessing the "getFirst()"
# member function, not the 'first' member variable, but I've yet to figure
# out how to find/call member functions (especially (const) overloaded
# ones) on a gdb.Value.
while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
self.cur = self.cur + 1
def __next__(self):
if self.cur == self.end:
raise StopIteration
cur = self.cur
v = cur.dereference()['first' if self.first else 'second']
if not self.first:
self.cur = self.cur + 1
self.advancePastEmptyBuckets()
self.first = True
else:
self.first = False
return 'x', v
if sys.version_info.major == 2:
next = __next__
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(3).pointer()
begin = self.val['Buckets'].cast(t)
end = (begin + self.val['NumBuckets']).cast(t)
return self._iterator(self.val.type.template_argument(2), begin, end)
def to_string(self):
return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
def display_hint(self):
return 'map'
class StringMapPrinter:
"Print a StringMap"
def __init__(self, val):
self.val = val
def children(self):
it = self.val['TheTable']
end = (it + self.val['NumBuckets'])
value_ty = self.val.type.template_argument(0)
entry_base_ty = gdb.lookup_type('llvm::StringMapEntryBase')
tombstone = gdb.parse_and_eval('llvm::StringMapImpl::TombstoneIntVal');
while it != end:
it_deref = it.dereference()
if it_deref == 0 or it_deref == tombstone:
it = it + 1
continue
entry_ptr = it_deref.cast(entry_base_ty.pointer())
entry = entry_ptr.dereference()
str_len = entry['keyLength']
value_ptr = (entry_ptr + 1).cast(value_ty.pointer())
str_data = (entry_ptr + 1).cast(gdb.lookup_type('uintptr_t')) + max(value_ty.sizeof, entry_base_ty.alignof)
str_data = str_data.cast(gdb.lookup_type('char').const().pointer())
string_ref = gdb.Value(struct.pack('PN', int(str_data), int(str_len)), gdb.lookup_type('llvm::StringRef'))
yield 'key', string_ref
value = value_ptr.dereference()
yield 'value', value
it = it + 1
def to_string(self):
return 'llvm::StringMap with %d elements' % (self.val['NumItems'])
def display_hint(self):
return 'map'
class TwinePrinter:
"Print a Twine"
def __init__(self, val):
self._val = val
def display_hint(self):
return 'string'
def string_from_pretty_printer_lookup(self, val):
'''Lookup the default pretty-printer for val and use it.
If no pretty-printer is defined for the type of val, print an error and
return a placeholder string.'''
pp = gdb.default_visualizer(val)
if pp:
s = pp.to_string()
# The pretty-printer may return a LazyString instead of an actual Python
# string. Convert it to a Python string. However, GDB doesn't seem to
# register the LazyString type, so we can't check
# "type(s) == gdb.LazyString".
if 'LazyString' in type(s).__name__:
s = s.value().address.string()
else:
print(('No pretty printer for {} found. The resulting Twine ' +
'representation will be incomplete.').format(val.type.name))
s = '(missing {})'.format(val.type.name)
return s
def is_twine_kind(self, kind, expected):
if not kind.endswith(expected):
return False
# apparently some GDB versions add the NodeKind:: namespace
# (happens for me on GDB 7.11)
return kind in ('llvm::Twine::' + expected,
'llvm::Twine::NodeKind::' + expected)
def string_from_child(self, child, kind):
'''Return the string representation of the Twine::Child child.'''
if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
return ''
if self.is_twine_kind(kind, 'TwineKind'):
return self.string_from_twine_object(child['twine'].dereference())
if self.is_twine_kind(kind, 'CStringKind'):
return child['cString'].string()
if self.is_twine_kind(kind, 'StdStringKind'):
val = child['stdString'].dereference()
return self.string_from_pretty_printer_lookup(val)
if self.is_twine_kind(kind, 'StringRefKind'):
val = child['stringRef'].dereference()
pp = StringRefPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'SmallStringKind'):
val = child['smallString'].dereference()
pp = SmallStringPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'CharKind'):
return chr(child['character'])
if self.is_twine_kind(kind, 'DecUIKind'):
return str(child['decUI'])
if self.is_twine_kind(kind, 'DecIKind'):
return str(child['decI'])
if self.is_twine_kind(kind, 'DecULKind'):
return str(child['decUL'].dereference())
if self.is_twine_kind(kind, 'DecLKind'):
return str(child['decL'].dereference())
if self.is_twine_kind(kind, 'DecULLKind'):
return str(child['decULL'].dereference())
if self.is_twine_kind(kind, 'DecLLKind'):
return str(child['decLL'].dereference())
if self.is_twine_kind(kind, 'UHexKind'):
val = child['uHex'].dereference()
return hex(int(val))
print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
'incomplete.').format(kind))
return '(unhandled {})'.format(kind)
def string_from_twine_object(self, twine):
'''Return the string representation of the Twine object twine.'''
lhs_str = ''
rhs_str = ''
lhs = twine['LHS']
rhs = twine['RHS']
lhs_kind = str(twine['LHSKind'])
rhs_kind = str(twine['RHSKind'])
lhs_str = self.string_from_child(lhs, lhs_kind)
rhs_str = self.string_from_child(rhs, rhs_kind)
return lhs_str + rhs_str
def to_string(self):
return self.string_from_twine_object(self._val)
def get_pointer_int_pair(val):
"""Get tuple from llvm::PointerIntPair."""
info_name = val.type.template_argument(4).strip_typedefs().name
# Note: this throws a gdb.error if the info type is not used (by means of a
# call to getPointer() or similar) in the current translation unit.
enum_type = gdb.lookup_type(info_name + '::MaskAndShiftConstants')
enum_dict = gdb.types.make_enum_dict(enum_type)
ptr_mask = enum_dict[info_name + '::PointerBitMask']
int_shift = enum_dict[info_name + '::IntShift']
int_mask = enum_dict[info_name + '::IntMask']
pair_union = val['Value']
pointer = (pair_union & ptr_mask)
value = ((pair_union >> int_shift) & int_mask)
return (pointer, value)
class PointerIntPairPrinter:
"""Print a PointerIntPair."""
def __init__(self, pointer, value):
self.pointer = pointer
self.value = value
def children(self):
yield ('pointer', self.pointer)
yield ('value', self.value)
def make_pointer_int_pair_printer(val):
"""Factory for an llvm::PointerIntPair printer."""
try:
pointer, value = get_pointer_int_pair(val)
except gdb.error:
return None # If PointerIntPair cannot be analyzed, print as raw value.
pointer_type = val.type.template_argument(0)
value_type = val.type.template_argument(2)
return PointerIntPairPrinter(pointer.cast(pointer_type),
value.cast(value_type))
class PointerUnionPrinter:
"""Print a PointerUnion."""
def __init__(self, pointer):
self.pointer = pointer
def children(self):
yield ('pointer', self.pointer)
def to_string(self):
return "Containing %s" % self.pointer.type
def make_pointer_union_printer(val):
"""Factory for an llvm::PointerUnion printer."""
try:
pointer, value = get_pointer_int_pair(val['Val'])
except gdb.error:
return None # If PointerIntPair cannot be analyzed, print as raw value.
pointer_type = val.type.template_argument(int(value))
return PointerUnionPrinter(pointer.cast(pointer_type))
class IlistNodePrinter:
"""Print an llvm::ilist_node object."""
def __init__(self, val):
impl_type = val.type.fields()[0].type
base_type = impl_type.fields()[0].type
derived_type = val.type.template_argument(0)
def get_prev_and_sentinel(base):
# One of Prev and PrevAndSentinel exists. Depending on #defines used to
# compile LLVM, the base_type's template argument is either true of false.
if base_type.template_argument(0):
return get_pointer_int_pair(base['PrevAndSentinel'])
return base['Prev'], None
# Casts a base_type pointer to the appropriate derived type.
def cast_pointer(pointer):
sentinel = get_prev_and_sentinel(pointer.dereference())[1]
pointer = pointer.cast(impl_type.pointer())
if sentinel:
return pointer
return pointer.cast(derived_type.pointer())
# Repeated cast becaue val.type's base_type is ambiguous when using tags.
base = val.cast(impl_type).cast(base_type)
(prev, sentinel) = get_prev_and_sentinel(base)
prev = prev.cast(base_type.pointer())
self.prev = cast_pointer(prev)
self.next = cast_pointer(val['Next'])
self.sentinel = sentinel
def children(self):
if self.sentinel:
yield 'sentinel', 'yes'
yield 'prev', self.prev
yield 'next', self.next
class IlistPrinter:
"""Print an llvm::simple_ilist or llvm::iplist object."""
def __init__(self, val):
self.node_type = val.type.template_argument(0)
sentinel = val['Sentinel']
# First field is common base type of sentinel and ilist_node.
base_type = sentinel.type.fields()[0].type
self.sentinel = sentinel.address.cast(base_type.pointer())
def _pointers(self):
pointer = self.sentinel
while True:
pointer = pointer['Next'].cast(pointer.type)
if pointer == self.sentinel:
return
yield pointer.cast(self.node_type.pointer())
def children(self):
for k, v in enumerate(self._pointers()):
yield ('[%d]' % k, v.dereference())
pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
pp.add_printer('llvm::ArrayRef', '^llvm::(Mutable)?ArrayRef<.*>$', ArrayRefPrinter)
pp.add_printer('llvm::Expected', '^llvm::Expected<.*>$', ExpectedPrinter)
pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
pp.add_printer('llvm::StringMap', '^llvm::StringMap<.*>$', StringMapPrinter)
pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
pp.add_printer('llvm::PointerIntPair', '^llvm::PointerIntPair<.*>$', make_pointer_int_pair_printer)
pp.add_printer('llvm::PointerUnion', '^llvm::PointerUnion<.*>$', make_pointer_union_printer)
pp.add_printer('llvm::ilist_node', '^llvm::ilist_node<.*>$', IlistNodePrinter)
pp.add_printer('llvm::iplist', '^llvm::iplist<.*>$', IlistPrinter)
pp.add_printer('llvm::simple_ilist', '^llvm::simple_ilist<.*>$', IlistPrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/gdb-scripts/prettyprinters.py |
#!/usr/bin/env python
#
# This is a tool that works like debug location coverage calculator.
# It parses the llvm-dwarfdump --statistics output by reporting it
# in a more human readable way.
#
from __future__ import print_function
import argparse
import os
import sys
from json import loads
from math import ceil
from collections import OrderedDict
from subprocess import Popen, PIPE
# Initialize the plot.
def init_plot(plt):
plt.title('Debug Location Statistics', fontweight='bold')
plt.xlabel('location buckets')
plt.ylabel('number of variables in the location buckets')
plt.xticks(rotation=45, fontsize='x-small')
plt.yticks()
# Finalize the plot.
def finish_plot(plt):
plt.legend()
plt.grid(color='grey', which='major', axis='y', linestyle='-', linewidth=0.3)
plt.savefig('locstats.png')
print('The plot was saved within "locstats.png".')
# Holds the debug location statistics.
class LocationStats:
def __init__(self, file_name, variables_total, variables_total_locstats,
variables_with_loc, variables_scope_bytes_covered, variables_scope_bytes,
variables_coverage_map):
self.file_name = file_name
self.variables_total = variables_total
self.variables_total_locstats = variables_total_locstats
self.variables_with_loc = variables_with_loc
self.scope_bytes_covered = variables_scope_bytes_covered
self.scope_bytes = variables_scope_bytes
self.variables_coverage_map = variables_coverage_map
# Get the PC ranges coverage.
def get_pc_coverage(self):
pc_ranges_covered = int(ceil(self.scope_bytes_covered * 100.0) \
/ self.scope_bytes)
return pc_ranges_covered
# Pretty print the debug location buckets.
def pretty_print(self):
if self.scope_bytes == 0:
print ('No scope bytes found.')
return -1
pc_ranges_covered = self.get_pc_coverage()
variables_coverage_per_map = {}
for cov_bucket in coverage_buckets():
variables_coverage_per_map[cov_bucket] = \
int(ceil(self.variables_coverage_map[cov_bucket] * 100.0) \
/ self.variables_total_locstats)
print (' =================================================')
print (' Debug Location Statistics ')
print (' =================================================')
print (' cov% samples percentage(~) ')
print (' -------------------------------------------------')
for cov_bucket in coverage_buckets():
print (' {0:10} {1:8d} {2:3d}%'. \
format(cov_bucket, self.variables_coverage_map[cov_bucket], \
variables_coverage_per_map[cov_bucket]))
print (' =================================================')
print (' -the number of debug variables processed: ' \
+ str(self.variables_total_locstats))
print (' -PC ranges covered: ' + str(pc_ranges_covered) + '%')
# Only if we are processing all the variables output the total
# availability.
if self.variables_total and self.variables_with_loc:
total_availability = int(ceil(self.variables_with_loc * 100.0) \
/ self.variables_total)
print (' -------------------------------------------------')
print (' -total availability: ' + str(total_availability) + '%')
print (' =================================================')
return 0
# Draw a plot representing the location buckets.
def draw_plot(self):
from matplotlib import pyplot as plt
buckets = range(len(self.variables_coverage_map))
plt.figure(figsize=(12, 8))
init_plot(plt)
plt.bar(buckets, self.variables_coverage_map.values(), align='center',
tick_label=self.variables_coverage_map.keys(),
label='variables of {}'.format(self.file_name))
# Place the text box with the coverage info.
pc_ranges_covered = self.get_pc_coverage()
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
plt.text(0.02, 0.90, 'PC ranges covered: {}%'.format(pc_ranges_covered),
transform=plt.gca().transAxes, fontsize=12,
verticalalignment='top', bbox=props)
finish_plot(plt)
# Compare the two LocationStats objects and draw a plot showing
# the difference.
def draw_location_diff(self, locstats_to_compare):
from matplotlib import pyplot as plt
pc_ranges_covered = self.get_pc_coverage()
pc_ranges_covered_to_compare = locstats_to_compare.get_pc_coverage()
buckets = range(len(self.variables_coverage_map))
buckets_to_compare = range(len(locstats_to_compare.variables_coverage_map))
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
init_plot(plt)
comparison_keys = list(coverage_buckets())
ax.bar(buckets, self.variables_coverage_map.values(), align='edge',
width=0.4,
label='variables of {}'.format(self.file_name))
ax.bar(buckets_to_compare,
locstats_to_compare.variables_coverage_map.values(),
color='r', align='edge', width=-0.4,
label='variables of {}'.format(locstats_to_compare.file_name))
ax.set_xticks(range(len(comparison_keys)))
ax.set_xticklabels(comparison_keys)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
plt.text(0.02, 0.88,
'{} PC ranges covered: {}%'. \
format(self.file_name, pc_ranges_covered),
transform=plt.gca().transAxes, fontsize=12,
verticalalignment='top', bbox=props)
plt.text(0.02, 0.83,
'{} PC ranges covered: {}%'. \
format(locstats_to_compare.file_name,
pc_ranges_covered_to_compare),
transform=plt.gca().transAxes, fontsize=12,
verticalalignment='top', bbox=props)
finish_plot(plt)
# Define the location buckets.
def coverage_buckets():
yield '0%'
yield '(0%,10%)'
for start in range(10, 91, 10):
yield '[{0}%,{1}%)'.format(start, start + 10)
yield '100%'
# Parse the JSON representing the debug statistics, and create a
# LocationStats object.
def parse_locstats(opts, binary):
# These will be different due to different options enabled.
variables_total = None
variables_total_locstats = None
variables_with_loc = None
variables_scope_bytes_covered = None
variables_scope_bytes = None
variables_scope_bytes_entry_values = None
variables_coverage_map = OrderedDict()
# Get the directory of the LLVM tools.
llvm_dwarfdump_cmd = os.path.join(os.path.dirname(__file__), \
"llvm-dwarfdump")
# The statistics llvm-dwarfdump option.
llvm_dwarfdump_stats_opt = "--statistics"
# Generate the stats with the llvm-dwarfdump.
subproc = Popen([llvm_dwarfdump_cmd, llvm_dwarfdump_stats_opt, binary], \
stdin=PIPE, stdout=PIPE, stderr=PIPE, \
universal_newlines = True)
cmd_stdout, cmd_stderr = subproc.communicate()
# Get the JSON and parse it.
json_parsed = None
try:
json_parsed = loads(cmd_stdout)
except:
print ('error: No valid llvm-dwarfdump statistics found.')
sys.exit(1)
# TODO: Parse the statistics Version from JSON.
if opts.only_variables:
# Read the JSON only for local variables.
variables_total_locstats = \
json_parsed['#local vars processed by location statistics']
variables_scope_bytes_covered = \
json_parsed['sum_all_local_vars(#bytes in parent scope covered' \
' by DW_AT_location)']
variables_scope_bytes = \
json_parsed['sum_all_local_vars(#bytes in parent scope)']
if not opts.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "#local vars with {} of parent scope covered " \
"by DW_AT_location".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['sum_all_local_vars(#bytes in parent scope ' \
'covered by DW_OP_entry_value)']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"#local vars - entry values with {} of parent scope " \
"covered by DW_AT_location".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
elif opts.only_formal_parameters:
# Read the JSON only for formal parameters.
variables_total_locstats = \
json_parsed['#params processed by location statistics']
variables_scope_bytes_covered = \
json_parsed['sum_all_params(#bytes in parent scope covered ' \
'by DW_AT_location)']
variables_scope_bytes = \
json_parsed['sum_all_params(#bytes in parent scope)']
if not opts.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "#params with {} of parent scope covered " \
"by DW_AT_location".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['sum_all_params(#bytes in parent scope covered ' \
'by DW_OP_entry_value)']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"#params - entry values with {} of parent scope covered" \
" by DW_AT_location".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
# Read the JSON for both local variables and formal parameters.
variables_total = \
json_parsed['#source variables']
variables_with_loc = json_parsed['#source variables with location']
variables_total_locstats = \
json_parsed['#variables processed by location statistics']
variables_scope_bytes_covered = \
json_parsed['sum_all_variables(#bytes in parent scope covered ' \
'by DW_AT_location)']
variables_scope_bytes = \
json_parsed['sum_all_variables(#bytes in parent scope)']
if not opts.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "#variables with {} of parent scope covered " \
"by DW_AT_location".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['sum_all_variables(#bytes in parent scope covered ' \
'by DW_OP_entry_value)']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"#variables - entry values with {} of parent scope covered " \
"by DW_AT_location".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
return LocationStats(binary, variables_total, variables_total_locstats,
variables_with_loc, variables_scope_bytes_covered,
variables_scope_bytes, variables_coverage_map)
# Parse the program arguments.
def parse_program_args(parser):
parser.add_argument('--only-variables', action='store_true', default=False,
help='calculate the location statistics only for local variables')
parser.add_argument('--only-formal-parameters', action='store_true',
default=False,
help='calculate the location statistics only for formal parameters')
parser.add_argument('--ignore-debug-entry-values', action='store_true',
default=False,
help='ignore the location statistics on locations with '
'entry values')
parser.add_argument('--draw-plot', action='store_true', default=False,
help='show histogram of location buckets generated (requires '
'matplotlib)')
parser.add_argument('--compare', action='store_true', default=False,
help='compare the debug location coverage on two files provided, '
'and draw a plot showing the difference (requires '
'matplotlib)')
parser.add_argument('file_names', nargs='+', type=str, help='file to process')
return parser.parse_args()
# Verify that the program inputs meet the requirements.
def verify_program_inputs(opts):
if len(sys.argv) < 2:
print ('error: Too few arguments.')
return False
if opts.only_variables and opts.only_formal_parameters:
print ('error: Please use just one --only* option.')
return False
if not opts.compare and len(opts.file_names) != 1:
print ('error: Please specify only one file to process.')
return False
if opts.compare and len(opts.file_names) != 2:
print ('error: Please specify two files to process.')
return False
if opts.draw_plot or opts.compare:
try:
import matplotlib
except ImportError:
print('error: matplotlib not found.')
return False
return True
def Main():
parser = argparse.ArgumentParser()
opts = parse_program_args(parser)
if not verify_program_inputs(opts):
parser.print_help()
sys.exit(1)
binary_file = opts.file_names[0]
locstats = parse_locstats(opts, binary_file)
if not opts.compare:
if opts.draw_plot:
# Draw a histogram representing the location buckets.
locstats.draw_plot()
else:
# Pretty print collected info on the standard output.
if locstats.pretty_print() == -1:
sys.exit(0)
else:
binary_file_to_compare = opts.file_names[1]
locstats_to_compare = parse_locstats(opts, binary_file_to_compare)
# Draw a plot showing the difference in debug location coverage between
# two files.
locstats.draw_location_diff(locstats_to_compare)
if __name__ == '__main__':
Main()
sys.exit(0)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/llvm-locstats/llvm-locstats.py |
#!/usr/bin/env python
#
# Checks files to make sure they conform to LLVM standards which can be applied
# to any programming language: at present, line length and trailing whitespace.
import common_lint
import sys
class GenericCodeLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
common_lint.VerifyLineLength(filename, lines,
GenericCodeLint.MAX_LINE_LENGTH)
common_lint.VerifyTrailingWhitespace(filename, lines)
def GenericCodeLintMain(filenames):
common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
return 0
if __name__ == '__main__':
sys.exit(GenericCodeLintMain(sys.argv[1:]))
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lint/generic_lint.py |
#!/usr/bin/env python
#
# Checks C++ files to make sure they conform to LLVM standards, as specified in
# http://llvm.org/docs/CodingStandards.html .
#
# TODO: add unittests for the verifier functions:
# http://docs.python.org/library/unittest.html .
from __future__ import print_function
import common_lint
import re
import sys
def VerifyIncludes(filename, lines):
"""Makes sure the #includes are in proper order and no disallows files are
#included.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
"""
lint = []
include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
include_support_re = re.compile(r'^#include "(Support/.*)"')
include_config_re = re.compile(r'^#include "(Config/.*)"')
include_system_re = re.compile(r'^#include <(.*)>')
DISALLOWED_SYSTEM_HEADERS = ['iostream']
line_num = 1
prev_config_header = None
prev_system_header = None
for line in lines:
# TODO: implement private headers
# TODO: implement gtest headers
# TODO: implement top-level llvm/* headers
# TODO: implement llvm/Support/* headers
# Process Config/* headers
config_header = include_config_re.match(line)
if config_header:
curr_config_header = config_header.group(1)
if prev_config_header:
if prev_config_header > curr_config_header:
lint.append((filename, line_num,
'Config headers not in order: "%s" before "%s"' % (
prev_config_header, curr_config_header)))
# Process system headers
system_header = include_system_re.match(line)
if system_header:
curr_system_header = system_header.group(1)
# Is it disallowed?
if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
lint.append((filename, line_num,
'Disallowed system header: <%s>' % curr_system_header))
elif prev_system_header:
# Make sure system headers are alphabetized amongst themselves
if prev_system_header > curr_system_header:
lint.append((filename, line_num,
'System headers not in order: <%s> before <%s>' % (
prev_system_header, curr_system_header)))
prev_system_header = curr_system_header
line_num += 1
return lint
class CppLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
lint = []
lint.extend(VerifyIncludes(filename, lines))
lint.extend(common_lint.VerifyLineLength(filename, lines,
CppLint.MAX_LINE_LENGTH))
lint.extend(common_lint.VerifyTabs(filename, lines))
lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
return lint
def CppLintMain(filenames):
all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
for lint in all_lint:
print('%s:%d:%s' % (lint[0], lint[1], lint[2]))
return 0
if __name__ == '__main__':
sys.exit(CppLintMain(sys.argv[1:]))
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lint/cpp_lint.py |
#!/usr/bin/env python
#
# Common lint functions applicable to multiple types of files.
from __future__ import print_function
import re
def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint
def VerifyTabs(filename, lines):
"""Checks to make sure the file has no tab characters.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(line_number, msg), ...] with any violations
found.
"""
lint = []
tab_re = re.compile(r'\t')
line_num = 1
for line in lines:
if tab_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Tab found instead of whitespace'))
line_num += 1
return lint
def VerifyTrailingWhitespace(filename, lines):
"""Checks to make sure the file has no lines with trailing whitespace.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
trailing_whitespace_re = re.compile(r'\s+$')
line_num = 1
for line in lines:
if trailing_whitespace_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Trailing whitespace'))
line_num += 1
return lint
class BaseLint:
def RunOnFile(filename, lines):
raise Exception('RunOnFile() unimplemented')
def RunLintOverAllFiles(linter, filenames):
"""Runs linter over the contents of all files.
Args:
lint: subclass of BaseLint, implementing RunOnFile()
filenames: list of all files whose contents will be linted
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
for filename in filenames:
file = open(filename, 'r')
if not file:
print('Cound not open %s' % filename)
continue
lines = file.readlines()
lint.extend(linter.RunOnFile(filename, lines))
return lint
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lint/common_lint.py |
#!/usr/bin/env python
""" A small program to compute checksums of LLVM checkout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import re
import sys
from argparse import ArgumentParser
from project_tree import *
SVN_DATES_REGEX = re.compile(r"\$(Date|LastChangedDate)[^\$]+\$")
def main():
parser = ArgumentParser()
parser.add_argument(
"-v", "--verbose", action="store_true", help="enable debug logging")
parser.add_argument(
"-c",
"--check",
metavar="reference_file",
help="read checksums from reference_file and " +
"check they match checksums of llvm_path.")
parser.add_argument(
"--partial",
action="store_true",
help="ignore projects from reference_file " +
"that are not checked out in llvm_path.")
parser.add_argument(
"--multi_dir",
action="store_true",
help="indicates llvm_path contains llvm, checked out " +
"into multiple directories, as opposed to a " +
"typical single source tree checkout.")
parser.add_argument("llvm_path")
args = parser.parse_args()
if args.check is not None:
with open(args.check, "r") as f:
reference_checksums = ReadLLVMChecksums(f)
else:
reference_checksums = None
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
llvm_projects = CreateLLVMProjects(not args.multi_dir)
checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
if reference_checksums is None:
WriteLLVMChecksums(checksums, sys.stdout)
sys.exit(0)
if not ValidateChecksums(reference_checksums, checksums, args.partial):
sys.stdout.write("Checksums differ.\nNew checksums:\n")
WriteLLVMChecksums(checksums, sys.stdout)
sys.stdout.write("Reference checksums:\n")
WriteLLVMChecksums(reference_checksums, sys.stdout)
sys.exit(1)
else:
sys.stdout.write("Checksums match.")
def ComputeLLVMChecksums(root_path, projects):
"""Compute checksums for LLVM sources checked out using svn.
Args:
root_path: a directory of llvm checkout.
projects: a list of LLVMProject instances, which describe checkout paths,
relative to root_path.
Returns:
A dict mapping from project name to project checksum.
"""
hash_algo = hashlib.sha256
def collapse_svn_substitutions(contents):
# Replace svn substitutions for $Date$ and $LastChangedDate$.
# Unfortunately, these are locale-specific.
return SVN_DATES_REGEX.sub("$\1$", contents)
def read_and_collapse_svn_subsitutions(file_path):
with open(file_path, "rb") as f:
contents = f.read()
new_contents = collapse_svn_substitutions(contents)
if contents != new_contents:
logging.debug("Replaced svn keyword substitutions in %s", file_path)
logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
return new_contents
project_checksums = dict()
# Hash each project.
for proj in projects:
project_root = os.path.join(root_path, proj.relpath)
if not os.path.exists(project_root):
logging.info("Folder %s doesn't exist, skipping project %s", proj.relpath,
proj.name)
continue
files = list()
def add_file_hash(file_path):
if os.path.islink(file_path) and not os.path.exists(file_path):
content = os.readlink(file_path)
else:
content = read_and_collapse_svn_subsitutions(file_path)
hasher = hash_algo()
hasher.update(content)
file_digest = hasher.hexdigest()
logging.debug("Checksum %s for file %s", file_digest, file_path)
files.append((file_path, file_digest))
logging.info("Computing checksum for %s", proj.name)
WalkProjectFiles(root_path, projects, proj, add_file_hash)
# Compute final checksum.
files.sort(key=lambda x: x[0])
hasher = hash_algo()
for file_path, file_digest in files:
file_path = os.path.relpath(file_path, project_root)
hasher.update(file_path)
hasher.update(file_digest)
project_checksums[proj.name] = hasher.hexdigest()
return project_checksums
def WriteLLVMChecksums(checksums, f):
"""Writes checksums to a text file.
Args:
checksums: a dict mapping from project name to project checksum (result of
ComputeLLVMChecksums).
f: a file object to write into.
"""
for proj in sorted(checksums.keys()):
f.write("{} {}\n".format(checksums[proj], proj))
def ReadLLVMChecksums(f):
"""Reads checksums from a text file, produced by WriteLLVMChecksums.
Returns:
A dict, mapping from project name to project checksum.
"""
checksums = {}
while True:
line = f.readline()
if line == "":
break
checksum, proj = line.split()
checksums[proj] = checksum
return checksums
def ValidateChecksums(reference_checksums,
new_checksums,
allow_missing_projects=False):
"""Validates that reference_checksums and new_checksums match.
Args:
reference_checksums: a dict of reference checksums, mapping from a project
name to a project checksum.
new_checksums: a dict of checksums to be checked, mapping from a project
name to a project checksum.
allow_missing_projects:
When True, reference_checksums may contain more projects than
new_checksums. Projects missing from new_checksums are ignored.
When False, new_checksums and reference_checksums must contain checksums
for the same set of projects. If there is a project in
reference_checksums, missing from new_checksums, ValidateChecksums
will return False.
Returns:
True, if checksums match with regards to allow_missing_projects flag value.
False, otherwise.
"""
if not allow_missing_projects:
if len(new_checksums) != len(reference_checksums):
return False
for proj, checksum in new_checksums.items():
# We never computed a checksum for this project.
if proj not in reference_checksums:
return False
# Checksum did not match.
if reference_checksums[proj] != checksum:
return False
return True
if __name__ == "__main__":
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/docker/scripts/llvm_checksum/llvm_checksum.py |
"""Contains helper functions to compute checksums for LLVM checkouts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import os.path
import sys
class LLVMProject(object):
"""An LLVM project with a descriptive name and a relative checkout path.
"""
def __init__(self, name, relpath):
self.name = name
self.relpath = relpath
def is_subproject(self, other_project):
""" Check if self is checked out as a subdirectory of other_project.
"""
return self.relpath.startswith(other_project.relpath)
def WalkProjectFiles(checkout_root, all_projects, project, visitor):
""" Walk over all files inside a project without recursing into subprojects, '.git' and '.svn' subfolders.
checkout_root: root of the LLVM checkout.
all_projects: projects in the LLVM checkout.
project: a project to walk the files of. Must be inside all_projects.
visitor: a function called on each visited file.
"""
assert project in all_projects
ignored_paths = set()
for other_project in all_projects:
if other_project != project and other_project.is_subproject(project):
ignored_paths.add(os.path.join(checkout_root, other_project.relpath))
def raise_error(err):
raise err
project_root = os.path.join(checkout_root, project.relpath)
for root, dirs, files in os.walk(project_root, onerror=raise_error):
dirs[:] = [
d for d in dirs
if d != ".svn" and d != ".git" and
os.path.join(root, d) not in ignored_paths
]
for f in files:
visitor(os.path.join(root, f))
def CreateLLVMProjects(single_tree_checkout):
"""Returns a list of LLVMProject instances, describing relative paths of a typical LLVM checkout.
Args:
single_tree_checkout:
When True, relative paths for each project points to a typical single
source tree checkout.
When False, relative paths for each projects points to a separate
directory. However, clang-tools-extra is an exception, its relative path
will always be 'clang/tools/extra'.
"""
# FIXME: cover all of llvm projects.
# Projects that reside inside 'projects/' in a single source tree checkout.
ORDINARY_PROJECTS = [
"compiler-rt", "dragonegg", "libcxx", "libcxxabi", "libunwind",
"parallel-libs", "test-suite"
]
# Projects that reside inside 'tools/' in a single source tree checkout.
TOOLS_PROJECTS = ["clang", "lld", "lldb"]
if single_tree_checkout:
projects = [LLVMProject("llvm", "")]
projects += [
LLVMProject(p, os.path.join("projects", p)) for p in ORDINARY_PROJECTS
]
projects += [
LLVMProject(p, os.path.join("tools", p)) for p in TOOLS_PROJECTS
]
projects.append(
LLVMProject("clang-tools-extra",
os.path.join("tools", "clang", "tools", "extra")))
else:
projects = [LLVMProject("llvm", "llvm")]
projects += [LLVMProject(p, p) for p in ORDINARY_PROJECTS]
projects += [LLVMProject(p, p) for p in TOOLS_PROJECTS]
projects.append(
LLVMProject("clang-tools-extra", os.path.join("clang", "tools",
"extra")))
return projects
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/docker/scripts/llvm_checksum/project_tree.py |
#!/usr/bin/env python3
from lit.main import main
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit.py |
import lit
import os
from setuptools import setup, find_packages
# setuptools expects to be invoked from within the directory of setup.py, but it
# is nice to allow:
# python path/to/setup.py install
# to work (for scripts, etc.)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "lit",
version = lit.__version__,
author = lit.__author__,
author_email = lit.__email__,
url = 'http://llvm.org',
license = 'Apache-2.0 with LLVM exception',
license_files = ['LICENSE.TXT'],
description = "A Software Testing Tool",
keywords = 'test C++ automatic discovery',
long_description = """\
*lit*
+++++
About
=====
*lit* is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. *lit* is
designed to be a lightweight testing tool with as simple a user interface as
possible.
Features
========
* Portable!
* Flexible test discovery.
* Parallel test execution.
* Support for multiple test formats and test suite designs.
Documentation
=============
The official *lit* documentation is in the man page, available online at the LLVM
Command Guide: http://llvm.org/cmds/lit.html.
Source
======
The *lit* source is available as part of LLVM, in the LLVM source repository:
https://github.com/llvm/llvm-project/tree/master/llvm/utils/lit
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
zip_safe = False,
packages = find_packages(),
entry_points = {
'console_scripts': [
'lit = lit.main:main',
],
}
)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/setup.py |
from __future__ import absolute_import
import itertools
import lit.util
from lit.ShCommands import Command, GlobItem, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return GlobItem(chunk) if '*' in chunk or '?' in chunk else chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
unquoted_glob_char = False
quoted_glob_char = False
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"' or c == "'":
self.eat()
quoted_arg = self.lex_arg_quoted(c)
if '*' in quoted_arg or '?' in quoted_arg:
quoted_glob_char = True
str += quoted_arg
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
elif c in '*?':
unquoted_glob_char = True
str += self.eat()
else:
str += self.eat()
# If a quote character is present, lex_arg_quoted will remove the quotes
# and append the argument directly. This causes a problem when the
# quoted portion contains a glob character, as the character will no
# longer be treated literally. If glob characters occur *only* inside
# of quotes, then we can handle this by not globbing at all, and if
# glob characters occur *only* outside of quotes, we can still glob just
# fine. But if a glob character occurs both inside and outside of
# quotes this presents a problem. In practice this is such an obscure
# edge case that it doesn't seem worth the added complexity to support.
# By adding an assertion, it means some bot somewhere will catch this
# and flag the user of a non-portable test (which could almost certainly
# be re-written to work correctly without triggering this).
assert not (quoted_glob_char and unquoted_glob_char)
return GlobItem(str) if unquoted_glob_char else str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
lit.util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError("Fast path failure: %r != %r" % (
res, reference))
if self.pos != end:
raise ValueError("Fast path failure: %r != %r" % (
self.pos, end))
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
for item in self.tokens:
return item
return None
def look(self):
token = self.lex()
if token is not None:
self.tokens = itertools.chain([token], self.tokens)
return token
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError("empty command!")
if isinstance(tok, tuple):
raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, (str, GlobItem)):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError(
"missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/ShUtil.py |
"""
The functions in this module are meant to run on a separate worker process.
Exception: in single process mode _execute is called directly.
For efficiency, we copy all data needed to execute all tests into each worker
and store it in global variables. This reduces the cost of each task.
"""
import contextlib
import os
import signal
import time
import traceback
import lit.Test
import lit.util
_lit_config = None
_parallelism_semaphores = None
def initialize(lit_config, parallelism_semaphores):
"""Copy data shared by all test executions into worker processes"""
global _lit_config
global _parallelism_semaphores
_lit_config = lit_config
_parallelism_semaphores = parallelism_semaphores
# We use the following strategy for dealing with Ctrl+C/KeyboardInterrupt in
# subprocesses created by the multiprocessing.Pool.
# https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
signal.signal(signal.SIGINT, signal.SIG_IGN)
def execute(test):
"""Run one test in a multiprocessing.Pool
Side effects in this function and functions it calls are not visible in the
main lit process.
Arguments and results of this function are pickled, so they should be cheap
to copy.
"""
with _get_parallelism_semaphore(test):
result = _execute(test, _lit_config)
test.setResult(result)
return test
# TODO(python3): replace with contextlib.nullcontext
@contextlib.contextmanager
def NopSemaphore():
yield
def _get_parallelism_semaphore(test):
pg = test.config.parallelism_group
if callable(pg):
pg = pg(test)
return _parallelism_semaphores.get(pg, NopSemaphore())
# Do not inline! Directly used by LitTestCase.py
def _execute(test, lit_config):
start = time.time()
result = _execute_test_handle_errors(test, lit_config)
result.elapsed = time.time() - start
result.start = start
result.pid = os.getpid()
return result
def _execute_test_handle_errors(test, lit_config):
try:
result = test.config.test_format.execute(test, lit_config)
return _adapt_result(result)
except:
if lit_config.debug:
raise
output = 'Exception during script execution:\n'
output += traceback.format_exc()
output += '\n'
return lit.Test.Result(lit.Test.UNRESOLVED, output)
# Support deprecated result from execute() which returned the result
# code and additional output as a tuple.
def _adapt_result(result):
if isinstance(result, lit.Test.Result):
return result
assert isinstance(result, tuple)
code, output = result
return lit.Test.Result(code, output)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/worker.py |
import multiprocessing
import os
import time
import lit.Test
import lit.util
import lit.worker
class MaxFailuresError(Exception):
pass
class TimeoutError(Exception):
pass
class Run(object):
"""A concrete, configured testing run."""
def __init__(self, tests, lit_config, workers, progress_callback,
max_failures, timeout):
self.tests = tests
self.lit_config = lit_config
self.workers = workers
self.progress_callback = progress_callback
self.max_failures = max_failures
self.timeout = timeout
assert workers > 0
def execute(self):
"""
Execute the tests in the run using up to the specified number of
parallel tasks, and inform the caller of each individual result. The
provided tests should be a subset of the tests available in this run
object.
The progress_callback will be invoked for each completed test.
If timeout is non-None, it should be a time in seconds after which to
stop executing tests.
Returns the elapsed testing time.
Upon completion, each test in the run will have its result
computed. Tests which were not actually executed (for any reason) will
be marked SKIPPED.
"""
self.failures = 0
# Larger timeouts (one year, positive infinity) don't work on Windows.
one_week = 7 * 24 * 60 * 60 # days * hours * minutes * seconds
timeout = self.timeout or one_week
deadline = time.time() + timeout
try:
self._execute(deadline)
finally:
skipped = lit.Test.Result(lit.Test.SKIPPED)
for test in self.tests:
if test.result is None:
test.setResult(skipped)
def _execute(self, deadline):
self._increase_process_limit()
semaphores = {k: multiprocessing.BoundedSemaphore(v)
for k, v in self.lit_config.parallelism_groups.items()
if v is not None}
pool = multiprocessing.Pool(self.workers, lit.worker.initialize,
(self.lit_config, semaphores))
async_results = [
pool.apply_async(lit.worker.execute, args=[test],
callback=self.progress_callback)
for test in self.tests]
pool.close()
try:
self._wait_for(async_results, deadline)
except:
pool.terminate()
raise
finally:
pool.join()
def _wait_for(self, async_results, deadline):
timeout = deadline - time.time()
for idx, ar in enumerate(async_results):
try:
test = ar.get(timeout)
except multiprocessing.TimeoutError:
raise TimeoutError()
else:
self._update_test(self.tests[idx], test)
if test.isFailure():
self.failures += 1
if self.failures == self.max_failures:
raise MaxFailuresError()
# Update local test object "in place" from remote test object. This
# ensures that the original test object which is used for printing test
# results reflects the changes.
def _update_test(self, local_test, remote_test):
# Needed for getMissingRequiredFeatures()
local_test.requires = remote_test.requires
local_test.result = remote_test.result
# TODO(yln): interferes with progress bar
# Some tests use threads internally, and at least on Linux each of these
# threads counts toward the current process limit. Try to raise the (soft)
# process limit so that tests don't fail due to resource exhaustion.
def _increase_process_limit(self):
ncpus = lit.util.usable_core_count()
desired_limit = self.workers * ncpus * 2 # the 2 is a safety factor
# Importing the resource module will likely fail on Windows.
try:
import resource
NPROC = resource.RLIMIT_NPROC
soft_limit, hard_limit = resource.getrlimit(NPROC)
desired_limit = min(desired_limit, hard_limit)
if soft_limit < desired_limit:
resource.setrlimit(NPROC, (desired_limit, hard_limit))
self.lit_config.note('Raised process limit from %d to %d' % \
(soft_limit, desired_limit))
except Exception as ex:
# Warn, unless this is Windows, in which case this is expected.
if os.name != 'nt':
self.lit_config.warning('Failed to raise process limit: %s' % ex)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/run.py |
import argparse
import os
import shlex
import sys
import lit.reports
import lit.util
def parse_args():
parser = argparse.ArgumentParser(prog='lit')
parser.add_argument('test_paths',
nargs='+',
metavar="TEST_PATH",
help='File or path to include in the test suite')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + lit.__version__)
parser.add_argument("-j", "--threads", "--workers",
dest="workers",
metavar="N",
help="Number of workers used for testing",
type=_positive_int,
default=lit.util.usable_core_count())
parser.add_argument("--config-prefix",
dest="configPrefix",
metavar="NAME",
help="Prefix for 'lit' config files")
parser.add_argument("-D", "--param",
dest="user_params",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
action="append",
default=[])
format_group = parser.add_argument_group("Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
format_group.add_argument("-q", "--quiet",
help="Suppress no error output",
action="store_true")
format_group.add_argument("-s", "--succinct",
help="Reduce amount of output."
" Additionally, show a progress bar,"
" unless --no-progress-bar is specified.",
action="store_true")
format_group.add_argument("-v", "--verbose",
dest="showOutput",
help="Show test output for failures",
action="store_true")
format_group.add_argument("-vv", "--echo-all-commands",
dest="echoAllCommands",
action="store_true",
help="Echo all commands as they are executed to stdout. In case of "
"failure, last command shown will be the failing one.")
format_group.add_argument("-a", "--show-all",
dest="showAllOutput",
help="Display all commandlines and output",
action="store_true")
format_group.add_argument("-o", "--output",
type=lit.reports.JsonReport,
help="Write test results to the provided path",
metavar="PATH")
format_group.add_argument("--no-progress-bar",
dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false")
# Note: this does not generate flags for user-defined result codes.
success_codes = [c for c in lit.Test.ResultCode.all_codes()
if not c.isFailure]
for code in success_codes:
format_group.add_argument(
"--show-{}".format(code.name.lower()),
dest="shown_codes",
help="Show {} tests ({})".format(code.label.lower(), code.name),
action="append_const",
const=code,
default=[])
execution_group = parser.add_argument_group("Test Execution")
execution_group.add_argument("--path",
help="Additional paths to add to testing environment",
action="append",
default=[])
execution_group.add_argument("--vg",
dest="useValgrind",
help="Run tests under valgrind",
action="store_true")
execution_group.add_argument("--vg-leak",
dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true")
execution_group.add_argument("--vg-arg",
dest="valgrindArgs",
metavar="ARG",
help="Specify an extra argument for valgrind",
action="append",
default=[])
execution_group.add_argument("--time-tests",
help="Track elapsed wall time for each test",
action="store_true")
execution_group.add_argument("--no-execute",
dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true")
execution_group.add_argument("--xunit-xml-output",
type=lit.reports.XunitReport,
help="Write XUnit-compatible XML test reports to the specified file")
execution_group.add_argument("--time-trace-output",
type=lit.reports.TimeTraceReport,
help="Write Chrome tracing compatible JSON to the specified file")
execution_group.add_argument("--timeout",
dest="maxIndividualTestTime",
help="Maximum time to spend running a single test (in seconds). "
"0 means no time limit. [Default: 0]",
type=_non_negative_int)
execution_group.add_argument("--max-failures",
help="Stop execution after the given number of failures.",
type=_positive_int)
execution_group.add_argument("--allow-empty-runs",
help="Do not fail the run if all tests are filtered out",
action="store_true")
execution_group.add_argument("--no-indirectly-run-check",
dest="indirectlyRunCheck",
help="Do not error if a test would not be run if the user had "
"specified the containing directory instead of naming the "
"test directly.",
action="store_false")
selection_group = parser.add_argument_group("Test Selection")
selection_group.add_argument("--max-tests",
metavar="N",
help="Maximum number of tests to run",
type=_positive_int)
selection_group.add_argument("--max-time",
dest="timeout",
metavar="N",
help="Maximum time to spend testing (in seconds)",
type=_positive_int)
selection_group.add_argument("--shuffle",
help="Run tests in random order",
action="store_true")
selection_group.add_argument("-i", "--incremental",
help="Run modified and failing tests first (updates mtimes)",
action="store_true")
selection_group.add_argument("--filter",
metavar="REGEX",
type=_case_insensitive_regex,
help="Only run tests with paths matching the given regular expression",
default=os.environ.get("LIT_FILTER", ".*"))
selection_group.add_argument("--num-shards",
dest="numShards",
metavar="M",
help="Split testsuite into M pieces and only run one",
type=_positive_int,
default=os.environ.get("LIT_NUM_SHARDS"))
selection_group.add_argument("--run-shard",
dest="runShard",
metavar="N",
help="Run shard #N of the testsuite",
type=_positive_int,
default=os.environ.get("LIT_RUN_SHARD"))
debug_group = parser.add_argument_group("Debug and Experimental Options")
debug_group.add_argument("--debug",
help="Enable debugging (for 'lit' development)",
action="store_true")
debug_group.add_argument("--show-suites",
help="Show discovered test suites and exit",
action="store_true")
debug_group.add_argument("--show-tests",
help="Show all discovered tests and exit",
action="store_true")
debug_group.add_argument("--show-used-features",
help="Show all features used in the test suite (in XFAIL, UNSUPPORTED and REQUIRES) and exit",
action="store_true")
# LIT is special: environment variables override command line arguments.
env_args = shlex.split(os.environ.get("LIT_OPTS", ""))
args = sys.argv[1:] + env_args
opts = parser.parse_args(args)
# Validate command line options
if opts.echoAllCommands:
opts.showOutput = True
# TODO(python3): Could be enum
if opts.shuffle:
opts.order = 'random'
elif opts.incremental:
opts.order = 'failing-first'
else:
opts.order = 'default'
if opts.numShards or opts.runShard:
if not opts.numShards or not opts.runShard:
parser.error("--num-shards and --run-shard must be used together")
if opts.runShard > opts.numShards:
parser.error("--run-shard must be between 1 and --num-shards (inclusive)")
opts.shard = (opts.runShard, opts.numShards)
else:
opts.shard = None
opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.time_trace_output])
return opts
def _positive_int(arg):
return _int(arg, 'positive', lambda i: i > 0)
def _non_negative_int(arg):
return _int(arg, 'non-negative', lambda i: i >= 0)
def _int(arg, kind, pred):
desc = "requires {} integer, but found '{}'"
try:
i = int(arg)
except ValueError:
raise _error(desc, kind, arg)
if not pred(i):
raise _error(desc, kind, arg)
return i
def _case_insensitive_regex(arg):
import re
try:
return re.compile(arg, re.IGNORECASE)
except re.error as reason:
raise _error("invalid regular expression: '{}', {}", arg, reason)
def _error(desc, *args):
msg = desc.format(*args)
return argparse.ArgumentTypeError(msg)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/cl_arguments.py |
"""
Test discovery functions.
"""
import copy
import os
import sys
from lit.TestingConfig import TestingConfig
from lit import LitConfig, Test
def chooseConfigFileFromDir(dir, config_names):
for name in config_names:
p = os.path.join(dir, name)
if os.path.exists(p):
return p
return None
def dirContainsTestSuite(path, lit_config):
cfgpath = chooseConfigFileFromDir(path, lit_config.site_config_names)
if not cfgpath:
cfgpath = chooseConfigFileFromDir(path, lit_config.config_names)
return cfgpath
def getTestSuite(item, litConfig, cache):
"""getTestSuite(item, litConfig, cache) -> (suite, relative_path)
Find the test suite containing @arg item.
@retval (None, ...) - Indicates no test suite contains @arg item.
@retval (suite, relative_path) - The suite that @arg item is in, and its
relative path inside that suite.
"""
def search1(path):
# Check for a site config or a lit config.
cfgpath = dirContainsTestSuite(path, litConfig)
# If we didn't find a config file, keep looking.
if not cfgpath:
parent,base = os.path.split(path)
if parent == path:
return (None, ())
ts, relative = search(parent)
return (ts, relative + (base,))
# This is a private builtin parameter which can be used to perform
# translation of configuration paths. Specifically, this parameter
# can be set to a dictionary that the discovery process will consult
# when it finds a configuration it is about to load. If the given
# path is in the map, the value of that key is a path to the
# configuration to load instead.
config_map = litConfig.params.get('config_map')
if config_map:
cfgpath = os.path.realpath(cfgpath)
cfgpath = os.path.normcase(cfgpath)
target = config_map.get(cfgpath)
if target:
cfgpath = target
# We found a test suite, create a new config for it and load it.
if litConfig.debug:
litConfig.note('loading suite config %r' % cfgpath)
cfg = TestingConfig.fromdefaults(litConfig)
cfg.load_from_path(cfgpath, litConfig)
source_root = os.path.realpath(cfg.test_source_root or path)
exec_root = os.path.realpath(cfg.test_exec_root or path)
return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
def search(path):
# Check for an already instantiated test suite.
real_path = os.path.realpath(path)
res = cache.get(real_path)
if res is None:
cache[real_path] = res = search1(path)
return res
# Canonicalize the path.
item = os.path.normpath(os.path.join(os.getcwd(), item))
# Skip files and virtual components.
components = []
while not os.path.isdir(item):
parent,base = os.path.split(item)
if parent == item:
return (None, ())
components.append(base)
item = parent
components.reverse()
ts, relative = search(item)
return ts, tuple(relative + tuple(components))
def getLocalConfig(ts, path_in_suite, litConfig, cache):
def search1(path_in_suite):
# Get the parent config.
if not path_in_suite:
parent = ts.config
else:
parent = search(path_in_suite[:-1])
# Check if there is a local configuration file.
source_path = ts.getSourcePath(path_in_suite)
cfgpath = chooseConfigFileFromDir(source_path, litConfig.local_config_names)
# If not, just reuse the parent config.
if not cfgpath:
return parent
# Otherwise, copy the current config and load the local configuration
# file into it.
config = copy.deepcopy(parent)
if litConfig.debug:
litConfig.note('loading local config %r' % cfgpath)
config.load_from_path(cfgpath, litConfig)
return config
def search(path_in_suite):
key = (ts, path_in_suite)
res = cache.get(key)
if res is None:
cache[key] = res = search1(path_in_suite)
return res
return search(path_in_suite)
def getTests(path, litConfig, testSuiteCache,
localConfigCache, indirectlyRunCheck):
# Find the test suite for this input and its relative path.
ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
if ts is None:
litConfig.warning('unable to find test suite for %r' % path)
return (),()
if litConfig.debug:
litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
path_in_suite))
return ts, getTestsInSuite(ts, path_in_suite, litConfig,
testSuiteCache, localConfigCache, indirectlyRunCheck)
def getTestsInSuite(ts, path_in_suite, litConfig,
testSuiteCache, localConfigCache, indirectlyRunCheck):
# Check that the source path exists (errors here are reported by the
# caller).
source_path = ts.getSourcePath(path_in_suite)
if not os.path.exists(source_path):
return
# Check if the user named a test directly.
if not os.path.isdir(source_path):
test_dir_in_suite = path_in_suite[:-1]
lc = getLocalConfig(ts, test_dir_in_suite, litConfig, localConfigCache)
test = Test.Test(ts, path_in_suite, lc)
# Issue a error if the specified test would not be run if
# the user had specified the containing directory instead of
# of naming the test directly. This helps to avoid writing
# tests which are not executed. The check adds some performance
# overhead which might be important if a large number of tests
# are being run directly.
# --no-indirectly-run-check: skips this check.
if indirectlyRunCheck and lc.test_format is not None:
found = False
for res in lc.test_format.getTestsInDirectory(ts, test_dir_in_suite,
litConfig, lc):
if test.getFullName() == res.getFullName():
found = True
break
if not found:
litConfig.error(
'%r would not be run indirectly: change name or LIT config'
% test.getFullName())
yield test
return
# Otherwise we have a directory to search for tests, start by getting the
# local configuration.
lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
# Search for tests.
if lc.test_format is not None:
for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
litConfig, lc):
yield res
# Search subdirectories.
for filename in os.listdir(source_path):
# FIXME: This doesn't belong here?
if filename in ('Output', '.svn', '.git') or filename in lc.excludes:
continue
# Ignore non-directories.
file_sourcepath = os.path.join(source_path, filename)
if not os.path.isdir(file_sourcepath):
continue
# Check for nested test suites, first in the execpath in case there is a
# site configuration and then in the source path.
subpath = path_in_suite + (filename,)
file_execpath = ts.getExecPath(subpath)
if dirContainsTestSuite(file_execpath, litConfig):
sub_ts, subpath_in_suite = getTestSuite(file_execpath, litConfig,
testSuiteCache)
elif dirContainsTestSuite(file_sourcepath, litConfig):
sub_ts, subpath_in_suite = getTestSuite(file_sourcepath, litConfig,
testSuiteCache)
else:
sub_ts = None
# If the this directory recursively maps back to the current test suite,
# disregard it (this can happen if the exec root is located inside the
# current test suite, for example).
if sub_ts is ts:
continue
# Otherwise, load from the nested test suite, if present.
if sub_ts is not None:
subiter = getTestsInSuite(sub_ts, subpath_in_suite, litConfig,
testSuiteCache, localConfigCache,
indirectlyRunCheck)
else:
subiter = getTestsInSuite(ts, subpath, litConfig, testSuiteCache,
localConfigCache, indirectlyRunCheck)
N = 0
for res in subiter:
N += 1
yield res
if sub_ts and not N:
litConfig.warning('test suite %r contained no tests' % sub_ts.name)
def find_tests_for_inputs(lit_config, inputs, indirectlyRunCheck):
"""
find_tests_for_inputs(lit_config, inputs) -> [Test]
Given a configuration object and a list of input specifiers, find all the
tests to execute.
"""
# Expand '@...' form in inputs.
actual_inputs = []
for input in inputs:
if input.startswith('@'):
f = open(input[1:])
try:
for ln in f:
ln = ln.strip()
if ln:
actual_inputs.append(ln)
finally:
f.close()
else:
actual_inputs.append(input)
# Load the tests from the inputs.
tests = []
test_suite_cache = {}
local_config_cache = {}
for input in actual_inputs:
prev = len(tests)
tests.extend(getTests(input, lit_config, test_suite_cache,
local_config_cache, indirectlyRunCheck)[1])
if prev == len(tests):
lit_config.warning('input %r contained no tests' % input)
# If there were any errors during test discovery, exit now.
if lit_config.numErrors:
sys.stderr.write('%d errors, exiting.\n' % lit_config.numErrors)
sys.exit(2)
return tests
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/discovery.py |
from __future__ import print_function
import errno
import itertools
import math
import numbers
import os
import platform
import signal
import subprocess
import sys
import threading
def is_string(value):
try:
# Python 2 and Python 3 are different here.
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
def pythonize_bool(value):
if value is None:
return False
if type(value) is bool:
return value
if isinstance(value, numbers.Number):
return value != 0
if is_string(value):
if value.lower() in ('1', 'true', 'on', 'yes'):
return True
if value.lower() in ('', '0', 'false', 'off', 'no'):
return False
raise ValueError('"{}" is not a valid boolean'.format(value))
def make_word_regex(word):
return r'\b' + word + r'\b'
def to_bytes(s):
"""Return the parameter as type 'bytes', possibly encoding it.
In Python2, the 'bytes' type is the same as 'str'. In Python3, they
are distinct.
"""
if isinstance(s, bytes):
# In Python2, this branch is taken for both 'str' and 'bytes'.
# In Python3, this branch is taken only for 'bytes'.
return s
# In Python2, 's' is a 'unicode' object.
# In Python3, 's' is a 'str' object.
# Encode to UTF-8 to get 'bytes' data.
return s.encode('utf-8')
def to_string(b):
"""Return the parameter as type 'str', possibly encoding it.
In Python2, the 'str' type is the same as 'bytes'. In Python3, the
'str' type is (essentially) Python2's 'unicode' type, and 'bytes' is
distinct.
"""
if isinstance(b, str):
# In Python2, this branch is taken for types 'str' and 'bytes'.
# In Python3, this branch is taken only for 'str'.
return b
if isinstance(b, bytes):
# In Python2, this branch is never taken ('bytes' is handled as 'str').
# In Python3, this is true only for 'bytes'.
try:
return b.decode('utf-8')
except UnicodeDecodeError:
# If the value is not valid Unicode, return the default
# repr-line encoding.
return str(b)
# By this point, here's what we *don't* have:
#
# - In Python2:
# - 'str' or 'bytes' (1st branch above)
# - In Python3:
# - 'str' (1st branch above)
# - 'bytes' (2nd branch above)
#
# The last type we might expect is the Python2 'unicode' type. There is no
# 'unicode' type in Python3 (all the Python3 cases were already handled). In
# order to get a 'str' object, we need to encode the 'unicode' object.
try:
return b.encode('utf-8')
except AttributeError:
raise TypeError('not sure how to convert %s to %s' % (type(b), str))
def to_unicode(s):
"""Return the parameter as type which supports unicode, possibly decoding
it.
In Python2, this is the unicode type. In Python3 it's the str type.
"""
if isinstance(s, bytes):
# In Python2, this branch is taken for both 'str' and 'bytes'.
# In Python3, this branch is taken only for 'bytes'.
return s.decode('utf-8')
return s
def usable_core_count():
"""Return the number of cores the current process can use, if supported.
Otherwise, return the total number of cores (like `os.cpu_count()`).
Default to 1 if undetermined.
"""
try:
n = len(os.sched_getaffinity(0))
except AttributeError:
n = os.cpu_count() or 1
# On Windows, with more than 32 processes, process creation often fails with
# "Too many open files". FIXME: Check if there's a better fix.
if platform.system() == 'Windows':
return min(n, 32)
return n
def mkdir(path):
try:
if platform.system() == 'Windows':
from ctypes import windll
from ctypes import GetLastError, WinError
path = os.path.abspath(path)
# Make sure that the path uses backslashes here, in case
# python would have happened to use forward slashes, as the
# NT path format only supports backslashes.
path = path.replace('/', '\\')
NTPath = to_unicode(r'\\?\%s' % path)
if not windll.kernel32.CreateDirectoryW(NTPath, None):
raise WinError(GetLastError())
else:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# ignore EEXIST, which may occur during a race condition
if e.errno != errno.EEXIST:
raise
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
mkdir(path)
def listdir_files(dirname, suffixes=None, exclude_filenames=None):
"""Yields files in a directory.
Filenames that are not excluded by rules below are yielded one at a time, as
basenames (i.e., without dirname).
Files starting with '.' are always skipped.
If 'suffixes' is not None, then only filenames ending with one of its
members will be yielded. These can be extensions, like '.exe', or strings,
like 'Test'. (It is a lexicographic check; so an empty sequence will yield
nothing, but a single empty string will yield all filenames.)
If 'exclude_filenames' is not None, then none of the file basenames in it
will be yielded.
If specified, the containers for 'suffixes' and 'exclude_filenames' must
support membership checking for strs.
Args:
dirname: a directory path.
suffixes: (optional) a sequence of strings (set, list, etc.).
exclude_filenames: (optional) a sequence of strings.
Yields:
Filenames as returned by os.listdir (generally, str).
"""
if exclude_filenames is None:
exclude_filenames = set()
if suffixes is None:
suffixes = {''}
for filename in os.listdir(dirname):
if (os.path.isdir(os.path.join(dirname, filename)) or
filename.startswith('.') or
filename in exclude_filenames or
not any(filename.endswith(sfx) for sfx in suffixes)):
continue
yield filename
def which(command, paths=None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH', '')
# Check for absolute match first.
if os.path.isabs(command) and os.path.isfile(command):
return os.path.normcase(os.path.normpath(command))
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return os.path.normcase(os.path.normpath(p))
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title='Items'):
items.sort(key=lambda item: item[1])
maxValue = max([v for _, v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name, v in items:
bin = min(int(N * v / maxValue), N - 1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print('Slowest %s:' % title)
print(hr)
for name, value in reversed(items[-20:]):
print('%.2fs: %s' % (value, name))
print('\n%s Times:' % title)
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3 - pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print('[%s] :: [%s] :: [%s]' % ('Range'.center((pDigits + 1) * 2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits * 2 + 1)))
print(hr)
for i, row in reversed(list(enumerate(histo))):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print('[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]' % (
pDigits, pfDigits, i * barH, pDigits, pfDigits, (i + 1) * barH,
'*' * w, ' ' * (barW - w), cDigits, len(row), cDigits, len(items)))
print(hr)
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""Execute command ``command`` (list of arguments or string) with.
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
if input is not None:
input = to_bytes(input)
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out, err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = to_string(out)
err = to_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def usePlatformSdkOnDarwin(config, lit_config):
# On Darwin, support relocatable SDKs by providing Clang with a
# default system root path.
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-path', '--sdk', 'macosx'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
sdk_path = out.decode()
lit_config.note('using SDKROOT: %r' % sdk_path)
config.environment['SDKROOT'] = sdk_path
def findPlatformSdkVersionOnMacOS(config, lit_config):
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-version', '--sdk', 'macosx'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
return out.decode()
return None
def killProcessAndChildrenIsSupported():
"""
Returns a tuple (<supported> , <error message>)
where
`<supported>` is True if `killProcessAndChildren()` is supported on
the current host, returns False otherwise.
`<error message>` is an empty string if `<supported>` is True,
otherwise is contains a string describing why the function is
not supported.
"""
if platform.system() == 'AIX':
return (True, "")
try:
import psutil # noqa: F401
return (True, "")
except ImportError:
return (False, "Requires the Python psutil module but it could"
" not be found. Try installing it via pip or via"
" your operating system's package manager.")
def killProcessAndChildren(pid):
"""This function kills a process with ``pid`` and all its running children
(recursively). It is currently implemented using the psutil module on some
platforms which provides a simple platform neutral implementation.
TODO: Reimplement this without using psutil on all platforms so we can
remove our dependency on it.
"""
if platform.system() == 'AIX':
subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True)
else:
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/util.py |
"""'lit' Testing Tool"""
__author__ = 'Daniel Dunbar'
__email__ = '[email protected]'
__versioninfo__ = (12, 0, 1)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/__init__.py |
import itertools
import os
from json import JSONEncoder
from lit.BooleanExpression import BooleanExpression
# Test result codes.
class ResultCode(object):
"""Test result codes."""
# All result codes (including user-defined ones) in declaration order
_all_codes = []
@staticmethod
def all_codes():
return ResultCode._all_codes
# We override __new__ and __getnewargs__ to ensure that pickling still
# provides unique ResultCode objects in any particular instance.
_instances = {}
def __new__(cls, name, label, isFailure):
res = cls._instances.get(name)
if res is None:
cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
return res
def __getnewargs__(self):
return (self.name, self.label, self.isFailure)
def __init__(self, name, label, isFailure):
self.name = name
self.label = label
self.isFailure = isFailure
ResultCode._all_codes.append(self)
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.isFailure))
# Successes
EXCLUDED = ResultCode('EXCLUDED', 'Excluded', False)
SKIPPED = ResultCode('SKIPPED', 'Skipped', False)
UNSUPPORTED = ResultCode('UNSUPPORTED', 'Unsupported', False)
PASS = ResultCode('PASS', 'Passed', False)
FLAKYPASS = ResultCode('FLAKYPASS', 'Passed With Retry', False)
XFAIL = ResultCode('XFAIL', 'Expectedly Failed', False)
# Failures
UNRESOLVED = ResultCode('UNRESOLVED', 'Unresolved', True)
TIMEOUT = ResultCode('TIMEOUT', 'Timed Out', True)
FAIL = ResultCode('FAIL', 'Failed', True)
XPASS = ResultCode('XPASS', 'Unexpectedly Passed', True)
# Test metric values.
class MetricValue(object):
def format(self):
"""
format() -> str
Convert this metric to a string suitable for displaying as part of the
console output.
"""
raise RuntimeError("abstract method")
def todata(self):
"""
todata() -> json-serializable data
Convert this metric to content suitable for serializing in the JSON test
output.
"""
raise RuntimeError("abstract method")
class IntMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return str(self.value)
def todata(self):
return self.value
class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return '%.4f' % self.value
def todata(self):
return self.value
class JSONMetricValue(MetricValue):
"""
JSONMetricValue is used for types that are representable in the output
but that are otherwise uninterpreted.
"""
def __init__(self, value):
# Ensure the value is a serializable by trying to encode it.
# WARNING: The value may change before it is encoded again, and may
# not be encodable after the change.
try:
e = JSONEncoder()
e.encode(value)
except TypeError:
raise
self.value = value
def format(self):
e = JSONEncoder(indent=2, sort_keys=True)
return e.encode(self.value)
def todata(self):
return self.value
def toMetricValue(value):
if isinstance(value, MetricValue):
return value
elif isinstance(value, int):
return IntMetricValue(value)
elif isinstance(value, float):
return RealMetricValue(value)
else:
# 'long' is only present in python2
try:
if isinstance(value, long):
return IntMetricValue(value)
except NameError:
pass
# Try to create a JSONMetricValue and let the constructor throw
# if value is not a valid type.
return JSONMetricValue(value)
# Test results.
class Result(object):
"""Wrapper for the results of executing an individual test."""
def __init__(self, code, output='', elapsed=None):
# The result code.
self.code = code
# The test output.
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
self.start = None
self.pid = None
# The metrics reported by this test.
self.metrics = {}
# The micro-test results reported by this test.
self.microResults = {}
def addMetric(self, name, value):
"""
addMetric(name, value)
Attach a test metric to the test result, with the given name and list of
values. It is an error to attempt to attach the metrics with the same
name multiple times.
Each value must be an instance of a MetricValue subclass.
"""
if name in self.metrics:
raise ValueError("result already includes metrics for %r" % (
name,))
if not isinstance(value, MetricValue):
raise TypeError("unexpected metric value: %r" % (value,))
self.metrics[name] = value
def addMicroResult(self, name, microResult):
"""
addMicroResult(microResult)
Attach a micro-test result to the test result, with the given name and
result. It is an error to attempt to attach a micro-test with the
same name multiple times.
Each micro-test result must be an instance of the Result class.
"""
if name in self.microResults:
raise ValueError("Result already includes microResult for %r" % (
name,))
if not isinstance(microResult, Result):
raise TypeError("unexpected MicroResult value %r" % (microResult,))
self.microResults[name] = microResult
# Test classes.
class TestSuite:
"""TestSuite - Information on a group of tests.
A test suite groups together a set of logically related tests.
"""
def __init__(self, name, source_root, exec_root, config):
self.name = name
self.source_root = source_root
self.exec_root = exec_root
# The test suite configuration.
self.config = config
def getSourcePath(self, components):
return os.path.join(self.source_root, *components)
def getExecPath(self, components):
return os.path.join(self.exec_root, *components)
class Test:
"""Test - Information on a single test instance."""
def __init__(self, suite, path_in_suite, config, file_path = None):
self.suite = suite
self.path_in_suite = path_in_suite
self.config = config
self.file_path = file_path
# A list of conditions under which this test is expected to fail.
# Each condition is a boolean expression of features and target
# triple parts. These can optionally be provided by test format
# handlers, and will be honored when the test result is supplied.
self.xfails = []
# A list of conditions that must be satisfied before running the test.
# Each condition is a boolean expression of features. All of them
# must be True for the test to run.
# FIXME should target triple parts count here too?
self.requires = []
# A list of conditions that prevent execution of the test.
# Each condition is a boolean expression of features and target
# triple parts. All of them must be False for the test to run.
self.unsupported = []
# An optional number of retries allowed before the test finally succeeds.
# The test is run at most once plus the number of retries specified here.
self.allowed_retries = getattr(config, 'test_retry_attempts', 0)
# The test result, once complete.
self.result = None
def setResult(self, result):
assert self.result is None, "result already set"
assert isinstance(result, Result), "unexpected result type"
try:
expected_to_fail = self.isExpectedToFail()
except ValueError as err:
# Syntax error in an XFAIL line.
result.code = UNRESOLVED
result.output = str(err)
else:
if expected_to_fail:
# pass -> unexpected pass
if result.code is PASS:
result.code = XPASS
# fail -> expected fail
elif result.code is FAIL:
result.code = XFAIL
self.result = result
def isFailure(self):
assert self.result
return self.result.code.isFailure
def getFullName(self):
return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
def getFilePath(self):
if self.file_path:
return self.file_path
return self.getSourcePath()
def getSourcePath(self):
return self.suite.getSourcePath(self.path_in_suite)
def getExecPath(self):
return self.suite.getExecPath(self.path_in_suite)
def isExpectedToFail(self):
"""
isExpectedToFail() -> bool
Check whether this test is expected to fail in the current
configuration. This check relies on the test xfails property which by
some test formats may not be computed until the test has first been
executed.
Throws ValueError if an XFAIL line has a syntax error.
"""
features = self.config.available_features
triple = getattr(self.suite.config, 'target_triple', "")
# Check if any of the xfails match an available feature or the target.
for item in self.xfails:
# If this is the wildcard, it always fails.
if item == '*':
return True
# If this is a True expression of features and target triple parts,
# it fails.
try:
if BooleanExpression.evaluate(item, features, triple):
return True
except ValueError as e:
raise ValueError('Error in XFAIL list:\n%s' % str(e))
return False
def isWithinFeatureLimits(self):
"""
isWithinFeatureLimits() -> bool
A test is within the feature limits set by run_only_tests if
1. the test's requirements ARE satisfied by the available features
2. the test's requirements ARE NOT satisfied after the limiting
features are removed from the available features
Throws ValueError if a REQUIRES line has a syntax error.
"""
if not self.config.limit_to_features:
return True # No limits. Run it.
# Check the requirements as-is (#1)
if self.getMissingRequiredFeatures():
return False
# Check the requirements after removing the limiting features (#2)
featuresMinusLimits = [f for f in self.config.available_features
if not f in self.config.limit_to_features]
if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
return False
return True
def getMissingRequiredFeaturesFromList(self, features):
try:
return [item for item in self.requires
if not BooleanExpression.evaluate(item, features)]
except ValueError as e:
raise ValueError('Error in REQUIRES list:\n%s' % str(e))
def getMissingRequiredFeatures(self):
"""
getMissingRequiredFeatures() -> list of strings
Returns a list of features from REQUIRES that are not satisfied."
Throws ValueError if a REQUIRES line has a syntax error.
"""
features = self.config.available_features
return self.getMissingRequiredFeaturesFromList(features)
def getUnsupportedFeatures(self):
"""
getUnsupportedFeatures() -> list of strings
Returns a list of features from UNSUPPORTED that are present
in the test configuration's features or target triple.
Throws ValueError if an UNSUPPORTED line has a syntax error.
"""
features = self.config.available_features
triple = getattr(self.suite.config, 'target_triple', "")
try:
return [item for item in self.unsupported
if BooleanExpression.evaluate(item, features, triple)]
except ValueError as e:
raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e))
def getUsedFeatures(self):
"""
getUsedFeatures() -> list of strings
Returns a list of all features appearing in XFAIL, UNSUPPORTED and
REQUIRES annotations for this test.
"""
import lit.TestRunner
parsed = lit.TestRunner._parseKeywords(self.getSourcePath(), require_script=False)
feature_keywords = ('UNSUPPORTED:', 'REQUIRES:', 'XFAIL:')
boolean_expressions = itertools.chain.from_iterable(
parsed[k] or [] for k in feature_keywords
)
tokens = itertools.chain.from_iterable(
BooleanExpression.tokenize(expr) for expr in
boolean_expressions if expr != '*'
)
identifiers = set(filter(BooleanExpression.isIdentifier, tokens))
return identifiers
def isEarlyTest(self):
"""
isEarlyTest() -> bool
Check whether this test should be executed early in a particular run.
This can be used for test suites with long running tests to maximize
parallelism or where it is desirable to surface their failures early.
"""
return self.suite.config.is_early
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/Test.py |
import sys
def create_display(opts, tests, total_tests, workers):
if opts.quiet:
return NopDisplay()
of_total = (' of %d' % total_tests) if (tests != total_tests) else ''
header = '-- Testing: %d%s tests, %d workers --' % (tests, of_total, workers)
progress_bar = None
if opts.succinct and opts.useProgressBar:
import lit.ProgressBar
try:
tc = lit.ProgressBar.TerminalController()
progress_bar = lit.ProgressBar.ProgressBar(tc, header)
header = None
except ValueError:
progress_bar = lit.ProgressBar.SimpleProgressBar('Testing: ')
return Display(opts, tests, header, progress_bar)
class NopDisplay(object):
def print_header(self): pass
def update(self, test): pass
def clear(self, interrupted): pass
class Display(object):
def __init__(self, opts, tests, header, progress_bar):
self.opts = opts
self.tests = tests
self.header = header
self.progress_bar = progress_bar
self.completed = 0
def print_header(self):
if self.header:
print(self.header)
if self.progress_bar:
self.progress_bar.update(0.0, '')
def update(self, test):
self.completed += 1
show_result = test.isFailure() or \
self.opts.showAllOutput or \
(not self.opts.quiet and not self.opts.succinct)
if show_result:
if self.progress_bar:
self.progress_bar.clear(interrupted=False)
self.print_result(test)
if self.progress_bar:
if test.isFailure():
self.progress_bar.barColor = 'RED'
percent = float(self.completed) / self.tests
self.progress_bar.update(percent, test.getFullName())
def clear(self, interrupted):
if self.progress_bar:
self.progress_bar.clear(interrupted)
def print_result(self, test):
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.tests))
# Show the test failure output, if requested.
if (test.isFailure() and self.opts.showOutput) or \
self.opts.showAllOutput:
if test.isFailure():
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
out = test.result.output
# Encode/decode so that, when using Python 3.6.5 in Windows 10,
# print(out) doesn't raise UnicodeEncodeError if out contains
# special characters. However, Python 2 might try to decode
# as part of the encode call if out is already encoded, so skip
# encoding if it raises UnicodeDecodeError.
if sys.stdout.encoding:
try:
out = out.encode(encoding=sys.stdout.encoding,
errors="replace")
except UnicodeDecodeError:
pass
# Python 2 can raise UnicodeDecodeError here too in cases
# where the stdout encoding is ASCII. Ignore decode errors
# in this case.
out = out.decode(encoding=sys.stdout.encoding, errors="ignore")
print(out)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Report micro-tests, if present
if test.result.microResults:
items = sorted(test.result.microResults.items())
for micro_test_name, micro_test in items:
print("%s MICRO-TEST: %s" %
('*'*3, micro_test_name))
if micro_test.metrics:
sorted_metrics = sorted(micro_test.metrics.items())
for metric_name, value in sorted_metrics:
print(' %s: %s ' % (metric_name, value.format()))
# Ensure the output is flushed.
sys.stdout.flush()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/display.py |
#!/usr/bin/env python
# Source: http://code.activestate.com/recipes/475116/, with
# modifications by Daniel Dunbar.
import sys, re, time
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print('This is '+term.GREEN+'green'+term.NORMAL)
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print(term.render('This is ${GREEN}green${NORMAL}'))
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print('This terminal supports clearning the screen.')
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try: import curses
except: return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
self.XN = curses.tigetflag('xenl')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, self._tparm(set_fg, i))
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, self._tparm(set_fg_ansi, i))
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, self._tparm(set_bg, i))
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
def _tparm(self, arg, index):
import curses
return curses.tparm(to_bytes(arg), index).decode('utf-8') or ''
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name)
if cap is None:
cap = ''
else:
cap = cap.decode('utf-8')
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
#######################################################################
# Example use case: progress bar
#######################################################################
class SimpleProgressBar:
"""
A simple progress bar which doesn't need any terminal support.
This prints out a progress bar like:
'Header: 0.. 10.. 20.. ...'
"""
def __init__(self, header):
self.header = header
self.atIndex = None
def update(self, percent, message):
if self.atIndex is None:
sys.stdout.write(self.header)
self.atIndex = 0
next = int(percent*50)
if next == self.atIndex:
return
for i in range(self.atIndex, next):
idx = i % 5
if idx == 0:
sys.stdout.write('%2d' % (i*2))
elif idx == 1:
pass # Skip second char
elif idx < 4:
sys.stdout.write('.')
else:
sys.stdout.write(' ')
sys.stdout.flush()
self.atIndex = next
def clear(self, interrupted):
if self.atIndex is not None and not interrupted:
sys.stdout.write('\n')
sys.stdout.flush()
self.atIndex = None
class ProgressBar:
"""
A 3-line progress bar, which looks like::
Header
20% [===========----------------------------------]
progress message
The progress bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
BAR = '%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
def __init__(self, term, header, useETA=True):
self.term = term
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
raise ValueError("Terminal isn't capable enough -- you "
"should use a simpler progress dispaly.")
self.BOL = self.term.BOL # BoL from col#79
self.XNL = "\n" # Newline from col#79
if self.term.COLS:
self.width = self.term.COLS
if not self.term.XN:
self.BOL = self.term.UP + self.term.BOL
self.XNL = "" # Cursor must be fed to the next line
else:
self.width = 75
self.barColor = 'GREEN'
self.header = self.term.render(self.HEADER % header.center(self.width))
self.cleared = 1 #: true if we haven't drawn the bar yet.
self.useETA = useETA
if self.useETA:
self.startTime = time.time()
# self.update(0, '')
def update(self, percent, message):
if self.cleared:
sys.stdout.write(self.header)
self.cleared = 0
prefix = '%3d%% ' % (percent*100,)
suffix = ''
if self.useETA:
elapsed = time.time() - self.startTime
if percent > .0001 and elapsed > 1:
total = elapsed / percent
eta = int(total - elapsed)
h = eta//3600.
m = (eta//60) % 60
s = eta % 60
suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
barWidth = self.width - len(prefix) - len(suffix) - 2
n = int(barWidth*percent)
if len(message) < self.width:
message = message + ' '*(self.width - len(message))
else:
message = '... ' + message[-(self.width-4):]
bc = self.barColor
bar = self.BAR % (prefix, bc, '='*n, '-'*(barWidth-n), bc, suffix)
bar = self.term.render(bar)
sys.stdout.write(
self.BOL + self.term.UP + self.term.CLEAR_EOL +
bar +
self.XNL +
self.term.CLEAR_EOL + message)
if not self.term.XN:
sys.stdout.flush()
def clear(self, interrupted):
if not self.cleared:
sys.stdout.write(self.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
if interrupted: # ^C creates extra line. Gobble it up!
sys.stdout.write(self.term.UP + self.term.CLEAR_EOL)
sys.stdout.write('^C')
sys.stdout.flush()
self.cleared = 1
def test():
tc = TerminalController()
p = ProgressBar(tc, 'Tests')
for i in range(101):
p.update(i/100., str(i))
time.sleep(.3)
if __name__=='__main__':
test()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/ProgressBar.py |
import os
import sys
class TestingConfig(object):
""""
TestingConfig - Information on the tests inside a suite.
"""
@staticmethod
def fromdefaults(litConfig):
"""
fromdefaults(litConfig) -> TestingConfig
Create a TestingConfig object with default values.
"""
# Set the environment based on the command line arguments.
environment = {
'PATH' : os.pathsep.join(litConfig.path +
[os.environ.get('PATH','')]),
'LLVM_DISABLE_CRASH_REPORT' : '1',
}
pass_vars = ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'SYSTEMROOT', 'TERM',
'CLANG', 'LD_PRELOAD', 'ASAN_OPTIONS', 'UBSAN_OPTIONS',
'LSAN_OPTIONS', 'ADB', 'ANDROID_SERIAL', 'SSH_AUTH_SOCK',
'SANITIZER_IGNORE_CVE_2016_2143', 'TMPDIR', 'TMP', 'TEMP',
'TEMPDIR', 'AVRLIT_BOARD', 'AVRLIT_PORT',
'FILECHECK_OPTS', 'VCINSTALLDIR', 'VCToolsinstallDir',
'VSINSTALLDIR', 'WindowsSdkDir', 'WindowsSDKLibVersion']
if sys.platform == 'win32':
pass_vars.append('INCLUDE')
pass_vars.append('LIB')
pass_vars.append('PATHEXT')
pass_vars.append('USERPROFILE')
environment['PYTHONBUFFERED'] = '1'
for var in pass_vars:
val = os.environ.get(var, '')
# Check for empty string as some variables such as LD_PRELOAD cannot be empty
# ('') for OS's such as OpenBSD.
if val:
environment[var] = val
# Set the default available features based on the LitConfig.
available_features = []
if litConfig.useValgrind:
available_features.append('valgrind')
if litConfig.valgrindLeakCheck:
available_features.append('vg_leak')
return TestingConfig(None,
name = '<unnamed>',
suffixes = set(),
test_format = None,
environment = environment,
substitutions = [],
unsupported = False,
test_exec_root = None,
test_source_root = None,
excludes = [],
available_features = available_features,
pipefail = True)
def load_from_path(self, path, litConfig):
"""
load_from_path(path, litConfig)
Load the configuration module at the provided path into the given config
object.
"""
# Load the config script data.
data = None
f = open(path)
try:
data = f.read()
except:
litConfig.fatal('unable to load config file: %r' % (path,))
f.close()
# Execute the config script to initialize the object.
cfg_globals = dict(globals())
cfg_globals['config'] = self
cfg_globals['lit_config'] = litConfig
cfg_globals['__file__'] = path
try:
exec(compile(data, path, 'exec'), cfg_globals, None)
if litConfig.debug:
litConfig.note('... loaded config %r' % path)
except SystemExit:
e = sys.exc_info()[1]
# We allow normal system exit inside a config file to just
# return control without error.
if e.args:
raise
except:
import traceback
litConfig.fatal(
'unable to parse config file %r, traceback: %s' % (
path, traceback.format_exc()))
self.finish(litConfig)
def __init__(self, parent, name, suffixes, test_format,
environment, substitutions, unsupported,
test_exec_root, test_source_root, excludes,
available_features, pipefail, limit_to_features = [],
is_early = False, parallelism_group = None):
self.parent = parent
self.name = str(name)
self.suffixes = set(suffixes)
self.test_format = test_format
self.environment = dict(environment)
self.substitutions = list(substitutions)
self.unsupported = unsupported
self.test_exec_root = test_exec_root
self.test_source_root = test_source_root
self.excludes = set(excludes)
self.available_features = set(available_features)
self.pipefail = pipefail
# This list is used by TestRunner.py to restrict running only tests that
# require one of the features in this list if this list is non-empty.
# Configurations can set this list to restrict the set of tests to run.
self.limit_to_features = set(limit_to_features)
# Whether the suite should be tested early in a given run.
self.is_early = bool(is_early)
self.parallelism_group = parallelism_group
self._recursiveExpansionLimit = None
@property
def recursiveExpansionLimit(self):
return self._recursiveExpansionLimit
@recursiveExpansionLimit.setter
def recursiveExpansionLimit(self, value):
if value is not None and not isinstance(value, int):
raise ValueError('recursiveExpansionLimit must be either None or an integer (got <{}>)'.format(value))
if isinstance(value, int) and value < 0:
raise ValueError('recursiveExpansionLimit must be a non-negative integer (got <{}>)'.format(value))
self._recursiveExpansionLimit = value
def finish(self, litConfig):
"""finish() - Finish this config object, after loading is complete."""
self.name = str(self.name)
self.suffixes = set(self.suffixes)
self.environment = dict(self.environment)
self.substitutions = list(self.substitutions)
if self.test_exec_root is not None:
# FIXME: This should really only be suite in test suite config
# files. Should we distinguish them?
self.test_exec_root = str(self.test_exec_root)
if self.test_source_root is not None:
# FIXME: This should really only be suite in test suite config
# files. Should we distinguish them?
self.test_source_root = str(self.test_source_root)
self.excludes = set(self.excludes)
@property
def root(self):
"""root attribute - The root configuration for the test suite."""
if self.parent is None:
return self
else:
return self.parent.root
class SubstituteCaptures:
"""
Helper class to indicate that the substitutions contains backreferences.
This can be used as the following in lit.cfg to mark subsitutions as having
back-references::
config.substutions.append(('\b[^ ]*.cpp', SubstituteCaptures('\0.txt')))
"""
def __init__(self, substitution):
self.substitution = substitution
def replace(self, pattern, replacement):
return self.substitution
def __str__(self):
return self.substitution
def __len__(self):
return len(self.substitution)
def __getitem__(self, item):
return self.substitution.__getitem__(item)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/TestingConfig.py |
import itertools
import json
from xml.sax.saxutils import quoteattr as quo
import lit.Test
def by_suite_and_test_path(test):
# Suite names are not necessarily unique. Include object identity in sort
# key to avoid mixing tests of different suites.
return (test.suite.name, id(test.suite), test.path_in_suite)
class JsonReport(object):
def __init__(self, output_file):
self.output_file = output_file
def write_results(self, tests, elapsed):
unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
tests = [t for t in tests if t.result.code not in unexecuted_codes]
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = elapsed
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in tests:
test_data = {
'name': test.getFullName(),
'code': test.result.code.name,
'output': test.result.output,
'elapsed': test.result.elapsed}
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
# Report micro-tests separately, if present
if test.result.microResults:
for key, micro_test in test.result.microResults.items():
# Expand parent test name with micro test name
parent_name = test.getFullName()
micro_full_name = parent_name + ':' + key
micro_test_data = {
'name': micro_full_name,
'code': micro_test.code.name,
'output': micro_test.output,
'elapsed': micro_test.elapsed}
if micro_test.metrics:
micro_test_data['metrics'] = micro_metrics_data = {}
for key, value in micro_test.metrics.items():
micro_metrics_data[key] = value.todata()
tests_data.append(micro_test_data)
tests_data.append(test_data)
with open(self.output_file, 'w') as file:
json.dump(data, file, indent=2, sort_keys=True)
file.write('\n')
_invalid_xml_chars_dict = {c: None for c in range(32) if chr(c) not in ('\t', '\n', '\r')}
def remove_invalid_xml_chars(s):
# According to the XML 1.0 spec, control characters other than
# \t,\r, and \n are not permitted anywhere in the document
# (https://www.w3.org/TR/xml/#charsets) and therefore this function
# removes them to produce a valid XML document.
#
# Note: In XML 1.1 only \0 is illegal (https://www.w3.org/TR/xml11/#charsets)
# but lit currently produces XML 1.0 output.
return s.translate(_invalid_xml_chars_dict)
class XunitReport(object):
def __init__(self, output_file):
self.output_file = output_file
self.skipped_codes = {lit.Test.EXCLUDED,
lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
def write_results(self, tests, elapsed):
tests.sort(key=by_suite_and_test_path)
tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
with open(self.output_file, 'w') as file:
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<testsuites time="{time:.2f}">\n'.format(time=elapsed))
for suite, test_iter in tests_by_suite:
self._write_testsuite(file, suite, list(test_iter))
file.write('</testsuites>\n')
def _write_testsuite(self, file, suite, tests):
skipped = sum(1 for t in tests if t.result.code in self.skipped_codes)
failures = sum(1 for t in tests if t.isFailure())
name = suite.config.name.replace('.', '-')
file.write(f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n')
for test in tests:
self._write_test(file, test, name)
file.write('</testsuite>\n')
def _write_test(self, file, test, suite_name):
path = '/'.join(test.path_in_suite[:-1]).replace('.', '_')
class_name = f'{suite_name}.{path or suite_name}'
name = test.path_in_suite[-1]
time = test.result.elapsed or 0.0
file.write(f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"')
if test.isFailure():
file.write('>\n <failure><![CDATA[')
# In the unlikely case that the output contains the CDATA
# terminator we wrap it by creating a new CDATA block.
output = test.result.output.replace(']]>', ']]]]><![CDATA[>')
if isinstance(output, bytes):
output = output.decode("utf-8", 'ignore')
# Failing test output sometimes contains control characters like
# \x1b (e.g. if there was some -fcolor-diagnostics output) which are
# not allowed inside XML files.
# This causes problems with CI systems: for example, the Jenkins
# JUnit XML will throw an exception when ecountering those
# characters and similar problems also occur with GitLab CI.
output = remove_invalid_xml_chars(output)
file.write(output)
file.write(']]></failure>\n</testcase>\n')
elif test.result.code in self.skipped_codes:
reason = self._get_skip_reason(test)
file.write(f'>\n <skipped message={quo(reason)}/>\n</testcase>\n')
else:
file.write('/>\n')
def _get_skip_reason(self, test):
code = test.result.code
if code == lit.Test.EXCLUDED:
return 'Test not selected (--filter, --max-tests)'
if code == lit.Test.SKIPPED:
return 'User interrupt'
assert code == lit.Test.UNSUPPORTED
features = test.getMissingRequiredFeatures()
if features:
return 'Missing required feature(s): ' + ', '.join(features)
return 'Unsupported configuration'
class TimeTraceReport(object):
def __init__(self, output_file):
self.output_file = output_file
self.skipped_codes = {lit.Test.EXCLUDED,
lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
def write_results(self, tests, elapsed):
# Find when first test started so we can make start times relative.
first_start_time = min([t.result.start for t in tests])
events = [self._get_test_event(
x, first_start_time) for x in tests if x.result.code not in self.skipped_codes]
json_data = {'traceEvents': events}
with open(self.output_file, "w") as time_trace_file:
json.dump(json_data, time_trace_file, indent=2, sort_keys=True)
def _get_test_event(self, test, first_start_time):
test_name = test.getFullName()
elapsed_time = test.result.elapsed or 0.0
start_time = test.result.start - first_start_time if test.result.start else 0.0
pid = test.result.pid or 0
return {
'pid': pid,
'tid': 1,
'ph': 'X',
'ts': int(start_time * 1000000.),
'dur': int(elapsed_time * 1000000.),
'name': test_name,
}
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/reports.py |
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
import itertools
import os
import platform
import sys
import time
import lit.cl_arguments
import lit.discovery
import lit.display
import lit.LitConfig
import lit.reports
import lit.run
import lit.Test
import lit.util
def main(builtin_params={}):
opts = lit.cl_arguments.parse_args()
params = create_params(builtin_params, opts.user_params)
is_windows = platform.system() == 'Windows'
lit_config = lit.LitConfig.LitConfig(
progname=os.path.basename(sys.argv[0]),
path=opts.path,
quiet=opts.quiet,
useValgrind=opts.useValgrind,
valgrindLeakCheck=opts.valgrindLeakCheck,
valgrindArgs=opts.valgrindArgs,
noExecute=opts.noExecute,
debug=opts.debug,
isWindows=is_windows,
params=params,
config_prefix=opts.configPrefix,
echo_all_commands=opts.echoAllCommands)
discovered_tests = lit.discovery.find_tests_for_inputs(lit_config, opts.test_paths,
opts.indirectlyRunCheck)
if not discovered_tests:
sys.stderr.write('error: did not discover any tests for provided path(s)\n')
sys.exit(2)
if opts.show_suites or opts.show_tests:
print_discovered(discovered_tests, opts.show_suites, opts.show_tests)
sys.exit(0)
if opts.show_used_features:
features = set(itertools.chain.from_iterable(t.getUsedFeatures() for t in discovered_tests))
print(' '.join(sorted(features)))
sys.exit(0)
# Command line overrides configuration for maxIndividualTestTime.
if opts.maxIndividualTestTime is not None: # `not None` is important (default: 0)
if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
lit_config.note(('The test suite configuration requested an individual'
' test timeout of {0} seconds but a timeout of {1} seconds was'
' requested on the command line. Forcing timeout to be {1}'
' seconds')
.format(lit_config.maxIndividualTestTime,
opts.maxIndividualTestTime))
lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
determine_order(discovered_tests, opts.order)
selected_tests = [t for t in discovered_tests if
opts.filter.search(t.getFullName())]
if not selected_tests:
sys.stderr.write('error: filter did not match any tests '
'(of %d discovered). ' % len(discovered_tests))
if opts.allow_empty_runs:
sys.stderr.write("Suppressing error because '--allow-empty-runs' "
'was specified.\n')
sys.exit(0)
else:
sys.stderr.write("Use '--allow-empty-runs' to suppress this "
'error.\n')
sys.exit(2)
# When running multiple shards, don't include skipped tests in the xunit
# output since merging the files will result in duplicates.
tests_for_report = discovered_tests
if opts.shard:
(run, shards) = opts.shard
selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
tests_for_report = selected_tests
if not selected_tests:
sys.stderr.write('warning: shard does not contain any tests. '
'Consider decreasing the number of shards.\n')
sys.exit(0)
selected_tests = selected_tests[:opts.max_tests]
mark_excluded(discovered_tests, selected_tests)
start = time.time()
run_tests(selected_tests, lit_config, opts, len(discovered_tests))
elapsed = time.time() - start
if opts.time_tests:
print_histogram(discovered_tests)
print_results(discovered_tests, elapsed, opts)
for report in opts.reports:
report.write_results(tests_for_report, elapsed)
if lit_config.numErrors:
sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)
sys.exit(2)
if lit_config.numWarnings:
sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)
has_failure = any(t.isFailure() for t in discovered_tests)
if has_failure:
sys.exit(1)
def create_params(builtin_params, user_params):
def parse(p):
return p.split('=', 1) if '=' in p else (p, '')
params = dict(builtin_params)
params.update([parse(p) for p in user_params])
return params
def print_discovered(tests, show_suites, show_tests):
tests.sort(key=lit.reports.by_suite_and_test_path)
if show_suites:
tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
print('-- Test Suites --')
for suite, test_iter in tests_by_suite:
test_count = sum(1 for _ in test_iter)
print(' %s - %d tests' % (suite.name, test_count))
print(' Source Root: %s' % suite.source_root)
print(' Exec Root : %s' % suite.exec_root)
features = ' '.join(sorted(suite.config.available_features))
print(' Available Features: %s' % features)
substitutions = sorted(suite.config.substitutions)
substitutions = ('%s => %s' % (x, y) for (x, y) in substitutions)
substitutions = '\n'.ljust(30).join(substitutions)
print(' Available Substitutions: %s' % substitutions)
if show_tests:
print('-- Available Tests --')
for t in tests:
print(' %s' % t.getFullName())
def determine_order(tests, order):
assert order in ['default', 'random', 'failing-first']
if order == 'default':
tests.sort(key=lambda t: (not t.isEarlyTest(), t.getFullName()))
elif order == 'random':
import random
random.shuffle(tests)
else:
def by_mtime(test):
return os.path.getmtime(test.getFilePath())
tests.sort(key=by_mtime, reverse=True)
def touch_file(test):
if test.isFailure():
os.utime(test.getFilePath(), None)
def filter_by_shard(tests, run, shards, lit_config):
test_ixs = range(run - 1, len(tests), shards)
selected_tests = [tests[i] for i in test_ixs]
# For clarity, generate a preview of the first few test indices in the shard
# to accompany the arithmetic expression.
preview_len = 3
preview = ', '.join([str(i + 1) for i in test_ixs[:preview_len]])
if len(test_ixs) > preview_len:
preview += ', ...'
msg = f'Selecting shard {run}/{shards} = ' \
f'size {len(selected_tests)}/{len(tests)} = ' \
f'tests #({shards}*k)+{run} = [{preview}]'
lit_config.note(msg)
return selected_tests
def mark_excluded(discovered_tests, selected_tests):
excluded_tests = set(discovered_tests) - set(selected_tests)
result = lit.Test.Result(lit.Test.EXCLUDED)
for t in excluded_tests:
t.setResult(result)
def run_tests(tests, lit_config, opts, discovered_tests):
workers = min(len(tests), opts.workers)
display = lit.display.create_display(opts, len(tests), discovered_tests,
workers)
def progress_callback(test):
display.update(test)
if opts.order == 'failing-first':
touch_file(test)
run = lit.run.Run(tests, lit_config, workers, progress_callback,
opts.max_failures, opts.timeout)
display.print_header()
interrupted = False
error = None
try:
execute_in_tmp_dir(run, lit_config)
except KeyboardInterrupt:
interrupted = True
error = ' interrupted by user'
except lit.run.MaxFailuresError:
error = 'warning: reached maximum number of test failures'
except lit.run.TimeoutError:
error = 'warning: reached timeout'
display.clear(interrupted)
if error:
sys.stderr.write('%s, skipping remaining tests\n' % error)
def execute_in_tmp_dir(run, lit_config):
# Create a temp directory inside the normal temp directory so that we can
# try to avoid temporary test file leaks. The user can avoid this behavior
# by setting LIT_PRESERVES_TMP in the environment, so they can easily use
# their own temp directory to monitor temporary file leaks or handle them at
# the buildbot level.
tmp_dir = None
if 'LIT_PRESERVES_TMP' not in os.environ:
import tempfile
tmp_dir = tempfile.mkdtemp(prefix="lit_tmp_")
os.environ.update({
'TMPDIR': tmp_dir,
'TMP': tmp_dir,
'TEMP': tmp_dir,
'TEMPDIR': tmp_dir,
})
try:
run.execute()
finally:
if tmp_dir:
try:
import shutil
shutil.rmtree(tmp_dir)
except Exception as e:
lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)
def print_histogram(tests):
test_times = [(t.getFullName(), t.result.elapsed)
for t in tests if t.result.elapsed]
if test_times:
lit.util.printHistogram(test_times, title='Tests')
def print_results(tests, elapsed, opts):
tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()}
for test in tests:
tests_by_code[test.result.code].append(test)
for code in lit.Test.ResultCode.all_codes():
print_group(tests_by_code[code], code, opts.shown_codes)
print_summary(tests_by_code, opts.quiet, elapsed)
def print_group(tests, code, shown_codes):
if not tests:
return
if not code.isFailure and code not in shown_codes:
return
print('*' * 20)
print('{} Tests ({}):'.format(code.label, len(tests)))
for test in tests:
print(' %s' % test.getFullName())
sys.stdout.write('\n')
def print_summary(tests_by_code, quiet, elapsed):
if not quiet:
print('\nTesting Time: %.2fs' % elapsed)
codes = [c for c in lit.Test.ResultCode.all_codes()
if not quiet or c.isFailure]
groups = [(c.label, len(tests_by_code[c])) for c in codes]
groups = [(label, count) for label, count in groups if count]
if not groups:
return
max_label_len = max(len(label) for label, _ in groups)
max_count_len = max(len(str(count)) for _, count in groups)
for (label, count) in groups:
label = label.ljust(max_label_len)
count = str(count).rjust(max_count_len)
print(' %s: %s' % (label, count))
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/main.py |
import re
class BooleanExpression:
# A simple evaluator of boolean expressions.
#
# Grammar:
# expr :: or_expr
# or_expr :: and_expr ('||' and_expr)*
# and_expr :: not_expr ('&&' not_expr)*
# not_expr :: '!' not_expr
# '(' or_expr ')'
# identifier
# identifier :: [-+=._a-zA-Z0-9]+
# Evaluates `string` as a boolean expression.
# Returns True or False. Throws a ValueError on syntax error.
#
# Variables in `variables` are true.
# Substrings of `triple` are true.
# 'true' is true.
# All other identifiers are false.
@staticmethod
def evaluate(string, variables, triple=""):
try:
parser = BooleanExpression(string, set(variables), triple)
return parser.parseAll()
except ValueError as e:
raise ValueError(str(e) + ('\nin expression: %r' % string))
#####
def __init__(self, string, variables, triple=""):
self.tokens = BooleanExpression.tokenize(string)
self.variables = variables
self.variables.add('true')
self.triple = triple
self.value = None
self.token = None
# Singleton end-of-expression marker.
END = object()
# Tokenization pattern.
Pattern = re.compile(r'\A\s*([()]|[-+=._a-zA-Z0-9]+|&&|\|\||!)\s*(.*)\Z')
@staticmethod
def tokenize(string):
while True:
m = re.match(BooleanExpression.Pattern, string)
if m is None:
if string == "":
yield BooleanExpression.END;
return
else:
raise ValueError("couldn't parse text: %r" % string)
token = m.group(1)
string = m.group(2)
yield token
def quote(self, token):
if token is BooleanExpression.END:
return '<end of expression>'
else:
return repr(token)
def accept(self, t):
if self.token == t:
self.token = next(self.tokens)
return True
else:
return False
def expect(self, t):
if self.token == t:
if self.token != BooleanExpression.END:
self.token = next(self.tokens)
else:
raise ValueError("expected: %s\nhave: %s" %
(self.quote(t), self.quote(self.token)))
@staticmethod
def isIdentifier(token):
if (token is BooleanExpression.END or token == '&&' or token == '||' or
token == '!' or token == '(' or token == ')'):
return False
return True
def parseNOT(self):
if self.accept('!'):
self.parseNOT()
self.value = not self.value
elif self.accept('('):
self.parseOR()
self.expect(')')
elif not BooleanExpression.isIdentifier(self.token):
raise ValueError("expected: '!' or '(' or identifier\nhave: %s" %
self.quote(self.token))
else:
self.value = (self.token in self.variables or
self.token in self.triple)
self.token = next(self.tokens)
def parseAND(self):
self.parseNOT()
while self.accept('&&'):
left = self.value
self.parseNOT()
right = self.value
# this is technically the wrong associativity, but it
# doesn't matter for this limited expression grammar
self.value = left and right
def parseOR(self):
self.parseAND()
while self.accept('||'):
left = self.value
self.parseAND()
right = self.value
# this is technically the wrong associativity, but it
# doesn't matter for this limited expression grammar
self.value = left or right
def parseAll(self):
self.token = next(self.tokens)
self.parseOR()
self.expect(BooleanExpression.END)
return self.value
#######
# Tests
import unittest
class TestBooleanExpression(unittest.TestCase):
def test_variables(self):
variables = {'its-true', 'false-lol-true', 'under_score',
'e=quals', 'd1g1ts'}
self.assertTrue(BooleanExpression.evaluate('true', variables))
self.assertTrue(BooleanExpression.evaluate('its-true', variables))
self.assertTrue(BooleanExpression.evaluate('false-lol-true', variables))
self.assertTrue(BooleanExpression.evaluate('under_score', variables))
self.assertTrue(BooleanExpression.evaluate('e=quals', variables))
self.assertTrue(BooleanExpression.evaluate('d1g1ts', variables))
self.assertFalse(BooleanExpression.evaluate('false', variables))
self.assertFalse(BooleanExpression.evaluate('True', variables))
self.assertFalse(BooleanExpression.evaluate('true-ish', variables))
self.assertFalse(BooleanExpression.evaluate('not_true', variables))
self.assertFalse(BooleanExpression.evaluate('tru', variables))
def test_triple(self):
triple = 'arch-vendor-os'
self.assertTrue(BooleanExpression.evaluate('arch-', {}, triple))
self.assertTrue(BooleanExpression.evaluate('ar', {}, triple))
self.assertTrue(BooleanExpression.evaluate('ch-vend', {}, triple))
self.assertTrue(BooleanExpression.evaluate('-vendor-', {}, triple))
self.assertTrue(BooleanExpression.evaluate('-os', {}, triple))
self.assertFalse(BooleanExpression.evaluate('arch-os', {}, triple))
def test_operators(self):
self.assertTrue(BooleanExpression.evaluate('true || true', {}))
self.assertTrue(BooleanExpression.evaluate('true || false', {}))
self.assertTrue(BooleanExpression.evaluate('false || true', {}))
self.assertFalse(BooleanExpression.evaluate('false || false', {}))
self.assertTrue(BooleanExpression.evaluate('true && true', {}))
self.assertFalse(BooleanExpression.evaluate('true && false', {}))
self.assertFalse(BooleanExpression.evaluate('false && true', {}))
self.assertFalse(BooleanExpression.evaluate('false && false', {}))
self.assertFalse(BooleanExpression.evaluate('!true', {}))
self.assertTrue(BooleanExpression.evaluate('!false', {}))
self.assertTrue(BooleanExpression.evaluate(' ((!((false) )) ) ', {}))
self.assertTrue(BooleanExpression.evaluate('true && (true && (true))', {}))
self.assertTrue(BooleanExpression.evaluate('!false && !false && !! !false', {}))
self.assertTrue(BooleanExpression.evaluate('false && false || true', {}))
self.assertTrue(BooleanExpression.evaluate('(false && false) || true', {}))
self.assertFalse(BooleanExpression.evaluate('false && (false || true)', {}))
# Evaluate boolean expression `expr`.
# Fail if it does not throw a ValueError containing the text `error`.
def checkException(self, expr, error):
try:
BooleanExpression.evaluate(expr, {})
self.fail("expression %r didn't cause an exception" % expr)
except ValueError as e:
if -1 == str(e).find(error):
self.fail(("expression %r caused the wrong ValueError\n" +
"actual error was:\n%s\n" +
"expected error was:\n%s\n") % (expr, e, error))
except BaseException as e:
self.fail(("expression %r caused the wrong exception; actual " +
"exception was: \n%r") % (expr, e))
def test_errors(self):
self.checkException("ba#d",
"couldn't parse text: '#d'\n" +
"in expression: 'ba#d'")
self.checkException("true and true",
"expected: <end of expression>\n" +
"have: 'and'\n" +
"in expression: 'true and true'")
self.checkException("|| true",
"expected: '!' or '(' or identifier\n" +
"have: '||'\n" +
"in expression: '|| true'")
self.checkException("true &&",
"expected: '!' or '(' or identifier\n" +
"have: <end of expression>\n" +
"in expression: 'true &&'")
self.checkException("",
"expected: '!' or '(' or identifier\n" +
"have: <end of expression>\n" +
"in expression: ''")
self.checkException("*",
"couldn't parse text: '*'\n" +
"in expression: '*'")
self.checkException("no wait stop",
"expected: <end of expression>\n" +
"have: 'wait'\n" +
"in expression: 'no wait stop'")
self.checkException("no-$-please",
"couldn't parse text: '$-please'\n" +
"in expression: 'no-$-please'")
self.checkException("(((true && true) || true)",
"expected: ')'\n" +
"have: <end of expression>\n" +
"in expression: '(((true && true) || true)'")
self.checkException("true (true)",
"expected: <end of expression>\n" +
"have: '('\n" +
"in expression: 'true (true)'")
self.checkException("( )",
"expected: '!' or '(' or identifier\n" +
"have: ')'\n" +
"in expression: '( )'")
if __name__ == '__main__':
unittest.main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/BooleanExpression.py |
class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class GlobItem:
def __init__(self, pattern):
self.pattern = pattern
def __repr__(self):
return self.pattern
def __eq__(self, other):
if not isinstance(other, Command):
return False
return (self.pattern == other.pattern)
def resolve(self, cwd):
import glob
import os
if os.path.isabs(self.pattern):
abspath = self.pattern
else:
abspath = os.path.join(cwd, self.pattern)
results = glob.glob(abspath)
return [self.pattern] if len(results) == 0 else results
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/ShCommands.py |
import unittest
import lit.discovery
import lit.LitConfig
import lit.worker
"""
TestCase adaptor for providing a Python 'unittest' compatible interface to 'lit'
tests.
"""
class UnresolvedError(RuntimeError):
pass
class LitTestCase(unittest.TestCase):
def __init__(self, test, lit_config):
unittest.TestCase.__init__(self)
self._test = test
self._lit_config = lit_config
def id(self):
return self._test.getFullName()
def shortDescription(self):
return self._test.getFullName()
def runTest(self):
# Run the test.
result = lit.worker._execute(self._test, self._lit_config)
# Adapt the result to unittest.
if result.code is lit.Test.UNRESOLVED:
raise UnresolvedError(result.output)
elif result.code.isFailure:
self.fail(result.output)
def load_test_suite(inputs):
import platform
windows = platform.system() == 'Windows'
# Create the global config object.
lit_config = lit.LitConfig.LitConfig(
progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=windows,
params={})
# Perform test discovery.
tests = lit.discovery.find_tests_for_inputs(lit_config, inputs, False)
test_adaptors = [LitTestCase(t, lit_config) for t in tests]
# Return a unittest test suite which just runs the tests in order.
return unittest.TestSuite(test_adaptors)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/LitTestCase.py |
from __future__ import absolute_import
import errno
import io
import itertools
import getopt
import os, signal, subprocess, sys
import re
import stat
import platform
import shutil
import tempfile
import threading
import io
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from lit.ShCommands import GlobItem, Command
import lit.ShUtil as ShUtil
import lit.Test as Test
import lit.util
from lit.util import to_bytes, to_string, to_unicode
from lit.BooleanExpression import BooleanExpression
class InternalShellError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
kIsWindows = platform.system() == 'Windows'
# Don't use close_fds on Windows.
kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
kDevNull = "/dev/null"
# A regex that matches %dbg(ARG), which lit inserts at the beginning of each
# run command pipeline such that ARG specifies the pipeline's source line
# number. lit later expands each %dbg(ARG) to a command that behaves as a null
# command in the target shell so that the line number is seen in lit's verbose
# mode.
#
# This regex captures ARG. ARG must not contain a right parenthesis, which
# terminates %dbg. ARG must not contain quotes, in which ARG might be enclosed
# during expansion.
kPdbgRegex = '%dbg\\(([^)\'"]*)\\)'
class ShellEnvironment(object):
"""Mutable shell environment containing things like CWD and env vars.
Environment variables are not implemented, but cwd tracking is.
"""
def __init__(self, cwd, env):
self.cwd = cwd
self.env = dict(env)
class TimeoutHelper(object):
"""
Object used to helper manage enforcing a timeout in
_executeShCmd(). It is passed through recursive calls
to collect processes that have been executed so that when
the timeout happens they can be killed.
"""
def __init__(self, timeout):
self.timeout = timeout
self._procs = []
self._timeoutReached = False
self._doneKillPass = False
# This lock will be used to protect concurrent access
# to _procs and _doneKillPass
self._lock = None
self._timer = None
def cancel(self):
if not self.active():
return
self._timer.cancel()
def active(self):
return self.timeout > 0
def addProcess(self, proc):
if not self.active():
return
needToRunKill = False
with self._lock:
self._procs.append(proc)
# Avoid re-entering the lock by finding out if kill needs to be run
# again here but call it if necessary once we have left the lock.
# We could use a reentrant lock here instead but this code seems
# clearer to me.
needToRunKill = self._doneKillPass
# The initial call to _kill() from the timer thread already happened so
# we need to call it again from this thread, otherwise this process
# will be left to run even though the timeout was already hit
if needToRunKill:
assert self.timeoutReached()
self._kill()
def startTimer(self):
if not self.active():
return
# Do some late initialisation that's only needed
# if there is a timeout set
self._lock = threading.Lock()
self._timer = threading.Timer(self.timeout, self._handleTimeoutReached)
self._timer.start()
def _handleTimeoutReached(self):
self._timeoutReached = True
self._kill()
def timeoutReached(self):
return self._timeoutReached
def _kill(self):
"""
This method may be called multiple times as we might get unlucky
and be in the middle of creating a new process in _executeShCmd()
which won't yet be in ``self._procs``. By locking here and in
addProcess() we should be able to kill processes launched after
the initial call to _kill()
"""
with self._lock:
for p in self._procs:
lit.util.killProcessAndChildren(p.pid)
# Empty the list and note that we've done a pass over the list
self._procs = [] # Python2 doesn't have list.clear()
self._doneKillPass = True
class ShellCommandResult(object):
"""Captures the result of an individual command."""
def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
outputFiles = []):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.exitCode = exitCode
self.timeoutReached = timeoutReached
self.outputFiles = list(outputFiles)
def executeShCmd(cmd, shenv, results, timeout=0):
"""
Wrapper around _executeShCmd that handles
timeout
"""
# Use the helper even when no timeout is required to make
# other code simpler (i.e. avoid bunch of ``!= None`` checks)
timeoutHelper = TimeoutHelper(timeout)
if timeout > 0:
timeoutHelper.startTimer()
finalExitCode = _executeShCmd(cmd, shenv, results, timeoutHelper)
timeoutHelper.cancel()
timeoutInfo = None
if timeoutHelper.timeoutReached():
timeoutInfo = 'Reached timeout of {} seconds'.format(timeout)
return (finalExitCode, timeoutInfo)
def expand_glob(arg, cwd):
if isinstance(arg, GlobItem):
return sorted(arg.resolve(cwd))
return [arg]
def expand_glob_expressions(args, cwd):
result = [args[0]]
for arg in args[1:]:
result.extend(expand_glob(arg, cwd))
return result
def quote_windows_command(seq):
"""
Reimplement Python's private subprocess.list2cmdline for MSys compatibility
Based on CPython implementation here:
https://hg.python.org/cpython/file/849826a900d2/Lib/subprocess.py#l422
Some core util distributions (MSys) don't tokenize command line arguments
the same way that MSVC CRT does. Lit rolls its own quoting logic similar to
the stock CPython logic to paper over these quoting and tokenization rule
differences.
We use the same algorithm from MSDN as CPython
(http://msdn.microsoft.com/en-us/library/17w5ykft.aspx), but we treat more
characters as needing quoting, such as double quotes themselves.
"""
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
# This logic differs from upstream list2cmdline.
needquote = (" " in arg) or ("\t" in arg) or ("\"" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# args are from 'export' or 'env' command.
# Skips the command, and parses its arguments.
# Modifies env accordingly.
# Returns copy of args without the command or its arguments.
def updateEnv(env, args):
arg_idx_next = len(args)
unset_next_env_var = False
for arg_idx, arg in enumerate(args[1:]):
# Support for the -u flag (unsetting) for env command
# e.g., env -u FOO -u BAR will remove both FOO and BAR
# from the environment.
if arg == '-u':
unset_next_env_var = True
continue
if unset_next_env_var:
unset_next_env_var = False
if arg in env.env:
del env.env[arg]
continue
# Partition the string into KEY=VALUE.
key, eq, val = arg.partition('=')
# Stop if there was no equals.
if eq == '':
arg_idx_next = arg_idx + 1
break
env.env[key] = val
return args[arg_idx_next:]
def executeBuiltinCd(cmd, shenv):
"""executeBuiltinCd - Change the current directory."""
if len(cmd.args) != 2:
raise InternalShellError("'cd' supports only one argument")
newdir = cmd.args[1]
# Update the cwd in the parent environment.
if os.path.isabs(newdir):
shenv.cwd = newdir
else:
shenv.cwd = os.path.realpath(os.path.join(shenv.cwd, newdir))
# The cd builtin always succeeds. If the directory does not exist, the
# following Popen calls will fail instead.
return ShellCommandResult(cmd, "", "", 0, False)
def executeBuiltinExport(cmd, shenv):
"""executeBuiltinExport - Set an environment variable."""
if len(cmd.args) != 2:
raise InternalShellError("'export' supports only one argument")
updateEnv(shenv, cmd.args)
return ShellCommandResult(cmd, "", "", 0, False)
def executeBuiltinEcho(cmd, shenv):
"""Interpret a redirected echo command"""
opened_files = []
stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv,
opened_files)
if stdin != subprocess.PIPE or stderr != subprocess.PIPE:
raise InternalShellError(
cmd, "stdin and stderr redirects not supported for echo")
# Some tests have un-redirected echo commands to help debug test failures.
# Buffer our output and return it to the caller.
is_redirected = True
encode = lambda x : x
if stdout == subprocess.PIPE:
is_redirected = False
stdout = StringIO()
elif kIsWindows:
# Reopen stdout in binary mode to avoid CRLF translation. The versions
# of echo we are replacing on Windows all emit plain LF, and the LLVM
# tests now depend on this.
# When we open as binary, however, this also means that we have to write
# 'bytes' objects to stdout instead of 'str' objects.
encode = lit.util.to_bytes
stdout = open(stdout.name, stdout.mode + 'b')
opened_files.append((None, None, stdout, None))
# Implement echo flags. We only support -e and -n, and not yet in
# combination. We have to ignore unknown flags, because `echo "-D FOO"`
# prints the dash.
args = cmd.args[1:]
interpret_escapes = False
write_newline = True
while len(args) >= 1 and args[0] in ('-e', '-n'):
flag = args[0]
args = args[1:]
if flag == '-e':
interpret_escapes = True
elif flag == '-n':
write_newline = False
def maybeUnescape(arg):
if not interpret_escapes:
return arg
arg = lit.util.to_bytes(arg)
codec = 'string_escape' if sys.version_info < (3,0) else 'unicode_escape'
return arg.decode(codec)
if args:
for arg in args[:-1]:
stdout.write(encode(maybeUnescape(arg)))
stdout.write(encode(' '))
stdout.write(encode(maybeUnescape(args[-1])))
if write_newline:
stdout.write(encode('\n'))
for (name, mode, f, path) in opened_files:
f.close()
output = "" if is_redirected else stdout.getvalue()
return ShellCommandResult(cmd, output, "", 0, False)
def executeBuiltinMkdir(cmd, cmd_shenv):
"""executeBuiltinMkdir - Create new directories."""
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
try:
opts, args = getopt.gnu_getopt(args, 'p')
except getopt.GetoptError as err:
raise InternalShellError(cmd, "Unsupported: 'mkdir': %s" % str(err))
parent = False
for o, a in opts:
if o == "-p":
parent = True
else:
assert False, "unhandled option"
if len(args) == 0:
raise InternalShellError(cmd, "Error: 'mkdir' is missing an operand")
stderr = StringIO()
exitCode = 0
for dir in args:
cwd = cmd_shenv.cwd
dir = to_unicode(dir) if kIsWindows else to_bytes(dir)
cwd = to_unicode(cwd) if kIsWindows else to_bytes(cwd)
if not os.path.isabs(dir):
dir = os.path.realpath(os.path.join(cwd, dir))
if parent:
lit.util.mkdir_p(dir)
else:
try:
lit.util.mkdir(dir)
except OSError as err:
stderr.write("Error: 'mkdir' command failed, %s\n" % str(err))
exitCode = 1
return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
def executeBuiltinRm(cmd, cmd_shenv):
"""executeBuiltinRm - Removes (deletes) files or directories."""
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
try:
opts, args = getopt.gnu_getopt(args, "frR", ["--recursive"])
except getopt.GetoptError as err:
raise InternalShellError(cmd, "Unsupported: 'rm': %s" % str(err))
force = False
recursive = False
for o, a in opts:
if o == "-f":
force = True
elif o in ("-r", "-R", "--recursive"):
recursive = True
else:
assert False, "unhandled option"
if len(args) == 0:
raise InternalShellError(cmd, "Error: 'rm' is missing an operand")
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and remove it.
os.chmod(path, stat.S_IMODE( os.stat(path).st_mode) | stat.S_IWRITE)
os.remove(path)
stderr = StringIO()
exitCode = 0
for path in args:
cwd = cmd_shenv.cwd
path = to_unicode(path) if kIsWindows else to_bytes(path)
cwd = to_unicode(cwd) if kIsWindows else to_bytes(cwd)
if not os.path.isabs(path):
path = os.path.realpath(os.path.join(cwd, path))
if force and not os.path.exists(path):
continue
try:
if os.path.isdir(path):
if not recursive:
stderr.write("Error: %s is a directory\n" % path)
exitCode = 1
if platform.system() == 'Windows':
# NOTE: use ctypes to access `SHFileOperationsW` on Windows to
# use the NT style path to get access to long file paths which
# cannot be removed otherwise.
from ctypes.wintypes import BOOL, HWND, LPCWSTR, UINT, WORD
from ctypes import addressof, byref, c_void_p, create_unicode_buffer
from ctypes import Structure
from ctypes import windll, WinError, POINTER
class SHFILEOPSTRUCTW(Structure):
_fields_ = [
('hWnd', HWND),
('wFunc', UINT),
('pFrom', LPCWSTR),
('pTo', LPCWSTR),
('fFlags', WORD),
('fAnyOperationsAborted', BOOL),
('hNameMappings', c_void_p),
('lpszProgressTitle', LPCWSTR),
]
FO_MOVE, FO_COPY, FO_DELETE, FO_RENAME = range(1, 5)
FOF_SILENT = 4
FOF_NOCONFIRMATION = 16
FOF_NOCONFIRMMKDIR = 512
FOF_NOERRORUI = 1024
FOF_NO_UI = FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_NOCONFIRMMKDIR
SHFileOperationW = windll.shell32.SHFileOperationW
SHFileOperationW.argtypes = [POINTER(SHFILEOPSTRUCTW)]
path = os.path.abspath(path)
pFrom = create_unicode_buffer(path, len(path) + 2)
pFrom[len(path)] = pFrom[len(path) + 1] = '\0'
operation = SHFILEOPSTRUCTW(wFunc=UINT(FO_DELETE),
pFrom=LPCWSTR(addressof(pFrom)),
fFlags=FOF_NO_UI)
result = SHFileOperationW(byref(operation))
if result:
raise WinError(result)
else:
shutil.rmtree(path, onerror = on_rm_error if force else None)
else:
if force and not os.access(path, os.W_OK):
os.chmod(path,
stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
os.remove(path)
except OSError as err:
stderr.write("Error: 'rm' command failed, %s" % str(err))
exitCode = 1
return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
def executeBuiltinColon(cmd, cmd_shenv):
"""executeBuiltinColon - Discard arguments and exit with status 0."""
return ShellCommandResult(cmd, "", "", 0, False)
def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
"""Return the standard fds for cmd after applying redirects
Returns the three standard file descriptors for the new child process. Each
fd may be an open, writable file object or a sentinel value from the
subprocess module.
"""
# Apply the redirections, we use (N,) as a sentinel to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
redirects = [(0,), (1,), (2,)]
for (op, filename) in cmd.redirects:
if op == ('>',2):
redirects[2] = [filename, 'w', None]
elif op == ('>>',2):
redirects[2] = [filename, 'a', None]
elif op == ('>&',2) and filename in '012':
redirects[2] = redirects[int(filename)]
elif op == ('>&',) or op == ('&>',):
redirects[1] = redirects[2] = [filename, 'w', None]
elif op == ('>',):
redirects[1] = [filename, 'w', None]
elif op == ('>>',):
redirects[1] = [filename, 'a', None]
elif op == ('<',):
redirects[0] = [filename, 'r', None]
else:
raise InternalShellError(cmd, "Unsupported redirect: %r" % ((op, filename),))
# Open file descriptors in a second pass.
std_fds = [None, None, None]
for (index, r) in enumerate(redirects):
# Handle the sentinel values for defaults up front.
if isinstance(r, tuple):
if r == (0,):
fd = stdin_source
elif r == (1,):
if index == 0:
raise InternalShellError(cmd, "Unsupported redirect for stdin")
elif index == 1:
fd = subprocess.PIPE
else:
fd = subprocess.STDOUT
elif r == (2,):
if index != 2:
raise InternalShellError(cmd, "Unsupported redirect on stdout")
fd = subprocess.PIPE
else:
raise InternalShellError(cmd, "Bad redirect")
std_fds[index] = fd
continue
(filename, mode, fd) = r
# Check if we already have an open fd. This can happen if stdout and
# stderr go to the same place.
if fd is not None:
std_fds[index] = fd
continue
redir_filename = None
name = expand_glob(filename, cmd_shenv.cwd)
if len(name) != 1:
raise InternalShellError(cmd, "Unsupported: glob in "
"redirect expanded to multiple files")
name = name[0]
if kAvoidDevNull and name == kDevNull:
fd = tempfile.TemporaryFile(mode=mode)
elif kIsWindows and name == '/dev/tty':
# Simulate /dev/tty on Windows.
# "CON" is a special filename for the console.
fd = open("CON", mode)
else:
# Make sure relative paths are relative to the cwd.
redir_filename = os.path.join(cmd_shenv.cwd, name)
redir_filename = to_unicode(redir_filename) \
if kIsWindows else to_bytes(redir_filename)
fd = open(redir_filename, mode)
# Workaround a Win32 and/or subprocess bug when appending.
#
# FIXME: Actually, this is probably an instance of PR6753.
if mode == 'a':
fd.seek(0, 2)
# Mutate the underlying redirect list so that we can redirect stdout
# and stderr to the same place without opening the file twice.
r[2] = fd
opened_files.append((filename, mode, fd) + (redir_filename,))
std_fds[index] = fd
return std_fds
def _executeShCmd(cmd, shenv, results, timeoutHelper):
if timeoutHelper.timeoutReached():
# Prevent further recursion if the timeout has been hit
# as we should try avoid launching more processes.
return None
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
return _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
if cmd.op == '&':
raise InternalShellError(cmd,"unsupported shell operator: '&'")
if cmd.op == '||':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
if res != 0:
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
return res
if cmd.op == '&&':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
if res is None:
return res
if res == 0:
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
return res
raise ValueError('Unknown shell command: %r' % cmd.op)
assert isinstance(cmd, ShUtil.Pipeline)
procs = []
default_stdin = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
builtin_commands = set(['cat', 'diff'])
builtin_commands_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "builtin_commands")
inproc_builtins = {'cd': executeBuiltinCd,
'export': executeBuiltinExport,
'echo': executeBuiltinEcho,
'mkdir': executeBuiltinMkdir,
'rm': executeBuiltinRm,
':': executeBuiltinColon}
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
# Reference the global environment by default.
cmd_shenv = shenv
args = list(j.args)
not_args = []
not_count = 0
not_crash = False
while True:
if args[0] == 'env':
# Create a copy of the global environment and modify it for
# this one command. There might be multiple envs in a pipeline,
# and there might be multiple envs in a command (usually when
# one comes from a substitution):
# env FOO=1 llc < %s | env BAR=2 llvm-mc | FileCheck %s
# env FOO=1 %{another_env_plus_cmd} | FileCheck %s
if cmd_shenv is shenv:
cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
args = updateEnv(cmd_shenv, args)
if not args:
raise InternalShellError(j, "Error: 'env' requires a"
" subcommand")
elif args[0] == 'not':
not_args.append(args.pop(0))
not_count += 1
if args and args[0] == '--crash':
not_args.append(args.pop(0))
not_crash = True
if not args:
raise InternalShellError(j, "Error: 'not' requires a"
" subcommand")
else:
break
# Handle in-process builtins.
#
# Handle "echo" as a builtin if it is not part of a pipeline. This
# greatly speeds up tests that construct input files by repeatedly
# echo-appending to a file.
# FIXME: Standardize on the builtin echo implementation. We can use a
# temporary file to sidestep blocking pipe write issues.
inproc_builtin = inproc_builtins.get(args[0], None)
if inproc_builtin and (args[0] != 'echo' or len(cmd.commands) == 1):
# env calling an in-process builtin is useless, so we take the safe
# approach of complaining.
if not cmd_shenv is shenv:
raise InternalShellError(j, "Error: 'env' cannot call '{}'"
.format(args[0]))
if not_crash:
raise InternalShellError(j, "Error: 'not --crash' cannot call"
" '{}'".format(args[0]))
if len(cmd.commands) != 1:
raise InternalShellError(j, "Unsupported: '{}' cannot be part"
" of a pipeline".format(args[0]))
result = inproc_builtin(Command(args, j.redirects), cmd_shenv)
if not_count % 2:
result.exitCode = int(not result.exitCode)
result.command.args = j.args;
results.append(result)
return result.exitCode
# Resolve any out-of-process builtin command before adding back 'not'
# commands.
if args[0] in builtin_commands:
args.insert(0, sys.executable)
cmd_shenv.env['PYTHONPATH'] = \
os.path.dirname(os.path.abspath(__file__))
args[1] = os.path.join(builtin_commands_dir, args[1] + ".py")
# We had to search through the 'not' commands to find all the 'env'
# commands and any other in-process builtin command. We don't want to
# reimplement 'not' and its '--crash' here, so just push all 'not'
# commands back to be called as external commands. Because this
# approach effectively moves all 'env' commands up front, it relies on
# the assumptions that (1) environment variables are not intended to be
# relevant to 'not' commands and (2) the 'env' command should always
# blindly pass along the status it receives from any command it calls.
args = not_args + args
stdin, stdout, stderr = processRedirects(j, default_stdin, cmd_shenv,
opened_files)
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
stderr = subprocess.PIPE
stderrIsStdout = True
else:
stderrIsStdout = False
# Don't allow stderr on a PIPE except for the last
# process, this could deadlock.
#
# FIXME: This is slow, but so is deadlock.
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
stderr = tempfile.TemporaryFile(mode='w+b')
stderrTempFiles.append((i, stderr))
# Resolve the executable path ourselves.
executable = None
# For paths relative to cwd, use the cwd of the shell environment.
if args[0].startswith('.'):
exe_in_cwd = os.path.join(cmd_shenv.cwd, args[0])
if os.path.isfile(exe_in_cwd):
executable = exe_in_cwd
if not executable:
executable = lit.util.which(args[0], cmd_shenv.env['PATH'])
if not executable:
raise InternalShellError(j, '%r: command not found' % args[0])
# Replace uses of /dev/null with temporary files.
if kAvoidDevNull:
# In Python 2.x, basestring is the base class for all string (including unicode)
# In Python 3.x, basestring no longer exist and str is always unicode
try:
str_type = basestring
except NameError:
str_type = str
for i,arg in enumerate(args):
if isinstance(arg, str_type) and kDevNull in arg:
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
named_temp_files.append(f.name)
args[i] = arg.replace(kDevNull, f.name)
# Expand all glob expressions
args = expand_glob_expressions(args, cmd_shenv.cwd)
# On Windows, do our own command line quoting for better compatibility
# with some core utility distributions.
if kIsWindows:
args = quote_windows_command(args)
try:
procs.append(subprocess.Popen(args, cwd=cmd_shenv.cwd,
executable = executable,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cmd_shenv.env,
close_fds = kUseCloseFDs))
# Let the helper know about this process
timeoutHelper.addProcess(procs[-1])
except OSError as e:
raise InternalShellError(j, 'Could not create process ({}) due to {}'.format(executable, e))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
# Update the current stdin source.
if stdout == subprocess.PIPE:
default_stdin = procs[-1].stdout
elif stderrIsStdout:
default_stdin = procs[-1].stderr
else:
default_stdin = subprocess.PIPE
# Explicitly close any redirected files. We need to do this now because we
# need to release any handles we may have on the temporary files (important
# on Win32, for example). Since we have already spawned the subprocess, our
# handles have already been transferred so we do not need them anymore.
for (name, mode, f, path) in opened_files:
f.close()
# FIXME: There is probably still deadlock potential here. Yawn.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
procData[i] = (procData[i][0], f.read())
f.close()
exitCode = None
for i,(out,err) in enumerate(procData):
res = procs[i].wait()
# Detect Ctrl-C in subprocess.
if res == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
try:
if out is None:
out = ''
else:
out = to_string(out.decode('utf-8', errors='replace'))
except:
out = str(out)
try:
if err is None:
err = ''
else:
err = to_string(err.decode('utf-8', errors='replace'))
except:
err = str(err)
# Gather the redirected output files for failed commands.
output_files = []
if res != 0:
for (name, mode, f, path) in sorted(opened_files):
if path is not None and mode in ('w', 'a'):
try:
with open(path, 'rb') as f:
data = f.read()
except:
data = None
if data is not None:
output_files.append((name, path, data))
results.append(ShellCommandResult(
cmd.commands[i], out, err, res, timeoutHelper.timeoutReached(),
output_files))
if cmd.pipe_err:
# Take the last failing exit code from the pipeline.
if not exitCode or res != 0:
exitCode = res
else:
exitCode = res
# Remove any named temporary files we created.
for f in named_temp_files:
try:
os.remove(f)
except OSError:
pass
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
cmds = []
for i, ln in enumerate(commands):
ln = commands[i] = re.sub(kPdbgRegex, ": '\\1'; ", ln)
try:
cmds.append(ShUtil.ShParser(ln, litConfig.isWindows,
test.config.pipefail).parse())
except:
return lit.Test.Result(Test.FAIL, "shell parser error on: %r" % ln)
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
results = []
timeoutInfo = None
try:
shenv = ShellEnvironment(cwd, test.config.environment)
exitCode, timeoutInfo = executeShCmd(cmd, shenv, results, timeout=litConfig.maxIndividualTestTime)
except InternalShellError:
e = sys.exc_info()[1]
exitCode = 127
results.append(
ShellCommandResult(e.command, '', e.message, exitCode, False))
out = err = ''
for i,result in enumerate(results):
# Write the command line run.
out += '$ %s\n' % (' '.join('"%s"' % s
for s in result.command.args),)
# If nothing interesting happened, move on.
if litConfig.maxIndividualTestTime == 0 and \
result.exitCode == 0 and \
not result.stdout.strip() and not result.stderr.strip():
continue
# Otherwise, something failed or was printed, show it.
# Add the command output, if redirected.
for (name, path, data) in result.outputFiles:
if data.strip():
out += "# redirected output from %r:\n" % (name,)
data = to_string(data.decode('utf-8', errors='replace'))
if len(data) > 1024:
out += data[:1024] + "\n...\n"
out += "note: data was truncated\n"
else:
out += data
out += "\n"
if result.stdout.strip():
out += '# command output:\n%s\n' % (result.stdout,)
if result.stderr.strip():
out += '# command stderr:\n%s\n' % (result.stderr,)
if not result.stdout.strip() and not result.stderr.strip():
out += "note: command had no output on stdout or stderr\n"
# Show the error conditions:
if result.exitCode != 0:
# On Windows, a negative exit code indicates a signal, and those are
# easier to recognize or look up if we print them in hex.
if litConfig.isWindows and result.exitCode < 0:
codeStr = hex(int(result.exitCode & 0xFFFFFFFF)).rstrip("L")
else:
codeStr = str(result.exitCode)
out += "error: command failed with exit status: %s\n" % (
codeStr,)
if litConfig.maxIndividualTestTime > 0 and result.timeoutReached:
out += 'error: command reached timeout: %s\n' % (
str(result.timeoutReached),)
return out, err, exitCode, timeoutInfo
def executeScript(test, litConfig, tmpBase, commands, cwd):
bashPath = litConfig.getBashPath()
isWin32CMDEXE = (litConfig.isWindows and not bashPath)
script = tmpBase + '.script'
if isWin32CMDEXE:
script += '.bat'
# Write script file
mode = 'w'
open_kwargs = {}
if litConfig.isWindows and not isWin32CMDEXE:
mode += 'b' # Avoid CRLFs when writing bash scripts.
elif sys.version_info > (3,0):
open_kwargs['encoding'] = 'utf-8'
f = open(script, mode, **open_kwargs)
if isWin32CMDEXE:
for i, ln in enumerate(commands):
commands[i] = re.sub(kPdbgRegex, "echo '\\1' > nul && ", ln)
if litConfig.echo_all_commands:
f.write('@echo on\n')
else:
f.write('@echo off\n')
f.write('\n@if %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
for i, ln in enumerate(commands):
commands[i] = re.sub(kPdbgRegex, ": '\\1'; ", ln)
if test.config.pipefail:
f.write(b'set -o pipefail;' if mode == 'wb' else 'set -o pipefail;')
if litConfig.echo_all_commands:
f.write(b'set -x;' if mode == 'wb' else 'set -x;')
if sys.version_info > (3,0) and mode == 'wb':
f.write(bytes('{ ' + '; } &&\n{ '.join(commands) + '; }', 'utf-8'))
else:
f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
f.write(b'\n' if mode == 'wb' else '\n')
f.close()
if isWin32CMDEXE:
command = ['cmd','/c', script]
else:
if bashPath:
command = [bashPath, script]
else:
command = ['/bin/sh', script]
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
try:
out, err, exitCode = lit.util.executeCommand(command, cwd=cwd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
return (out, err, exitCode, None)
except lit.util.ExecuteCommandTimeoutException as e:
return (e.out, e.err, e.exitCode, e.msg)
def parseIntegratedTestScriptCommands(source_path, keywords):
"""
parseIntegratedTestScriptCommands(source_path) -> commands
Parse the commands in an integrated test script file into a list of
(line_number, command_type, line).
"""
# This code is carefully written to be dual compatible with Python 2.5+ and
# Python 3 without requiring input files to always have valid codings. The
# trick we use is to open the file in binary mode and use the regular
# expression library to find the commands, with it scanning strings in
# Python2 and bytes in Python3.
#
# Once we find a match, we do require each script line to be decodable to
# UTF-8, so we convert the outputs to UTF-8 before returning. This way the
# remaining code can work with "strings" agnostic of the executing Python
# version.
keywords_re = re.compile(
to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),)))
f = open(source_path, 'rb')
try:
# Read the entire file contents.
data = f.read()
# Ensure the data ends with a newline.
if not data.endswith(to_bytes('\n')):
data = data + to_bytes('\n')
# Iterate over the matches.
line_number = 1
last_match_position = 0
for match in keywords_re.finditer(data):
# Compute the updated line number by counting the intervening
# newlines.
match_position = match.start()
line_number += data.count(to_bytes('\n'), last_match_position,
match_position)
last_match_position = match_position
# Convert the keyword and line to UTF-8 strings and yield the
# command. Note that we take care to return regular strings in
# Python 2, to avoid other code having to differentiate between the
# str and unicode types.
#
# Opening the file in binary mode prevented Windows \r newline
# characters from being converted to Unix \n newlines, so manually
# strip those from the yielded lines.
keyword,ln = match.groups()
yield (line_number, to_string(keyword.decode('utf-8')),
to_string(ln.decode('utf-8').rstrip('\r')))
finally:
f.close()
def getTempPaths(test):
"""Get the temporary location, this is always relative to the test suite
root, not test source root."""
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpDir = os.path.join(execdir, 'Output')
tmpBase = os.path.join(tmpDir, execbase)
return tmpDir, tmpBase
def colonNormalizePath(path):
if kIsWindows:
return re.sub(r'^(.):', r'\1', path.replace('\\', '/'))
else:
assert path[0] == '/'
return path[1:]
def getDefaultSubstitutions(test, tmpDir, tmpBase, normalize_slashes=False):
sourcepath = test.getSourcePath()
sourcedir = os.path.dirname(sourcepath)
# Normalize slashes, if requested.
if normalize_slashes:
sourcepath = sourcepath.replace('\\', '/')
sourcedir = sourcedir.replace('\\', '/')
tmpDir = tmpDir.replace('\\', '/')
tmpBase = tmpBase.replace('\\', '/')
substitutions = []
substitutions.extend(test.config.substitutions)
tmpName = tmpBase + '.tmp'
baseName = os.path.basename(tmpBase)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%{pathsep}', os.pathsep),
('%t', tmpName),
('%basename_t', baseName),
('%T', tmpDir)])
# "%/[STpst]" should be normalized.
substitutions.extend([
('%/s', sourcepath.replace('\\', '/')),
('%/S', sourcedir.replace('\\', '/')),
('%/p', sourcedir.replace('\\', '/')),
('%/t', tmpBase.replace('\\', '/') + '.tmp'),
('%/T', tmpDir.replace('\\', '/')),
])
# "%{/[STpst]:regex_replacement}" should be normalized like "%/[STpst]" but we're
# also in a regex replacement context of a s@@@ regex.
def regex_escape(s):
s = s.replace('@', r'\@')
s = s.replace('&', r'\&')
return s
substitutions.extend([
('%{/s:regex_replacement}',
regex_escape(sourcepath.replace('\\', '/'))),
('%{/S:regex_replacement}',
regex_escape(sourcedir.replace('\\', '/'))),
('%{/p:regex_replacement}',
regex_escape(sourcedir.replace('\\', '/'))),
('%{/t:regex_replacement}',
regex_escape(tmpBase.replace('\\', '/')) + '.tmp'),
('%{/T:regex_replacement}',
regex_escape(tmpDir.replace('\\', '/'))),
])
# "%:[STpst]" are normalized paths without colons and without a leading
# slash.
substitutions.extend([
('%:s', colonNormalizePath(sourcepath)),
('%:S', colonNormalizePath(sourcedir)),
('%:p', colonNormalizePath(sourcedir)),
('%:t', colonNormalizePath(tmpBase + '.tmp')),
('%:T', colonNormalizePath(tmpDir)),
])
return substitutions
def _memoize(f):
cache = {} # Intentionally unbounded, see applySubstitutions()
def memoized(x):
if x not in cache:
cache[x] = f(x)
return cache[x]
return memoized
@_memoize
def _caching_re_compile(r):
return re.compile(r)
def applySubstitutions(script, substitutions, recursion_limit=None):
"""
Apply substitutions to the script. Allow full regular expression syntax.
Replace each matching occurrence of regular expression pattern a with
substitution b in line ln.
If a substitution expands into another substitution, it is expanded
recursively until the line has no more expandable substitutions. If
the line can still can be substituted after being substituted
`recursion_limit` times, it is an error. If the `recursion_limit` is
`None` (the default), no recursive substitution is performed at all.
"""
# We use #_MARKER_# to hide %% while we do the other substitutions.
def escape(ln):
return _caching_re_compile('%%').sub('#_MARKER_#', ln)
def unescape(ln):
return _caching_re_compile('#_MARKER_#').sub('%', ln)
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
if kIsWindows:
b = b.replace("\\","\\\\")
# re.compile() has a built-in LRU cache with 512 entries. In some
# test suites lit ends up thrashing that cache, which made e.g.
# check-llvm run 50% slower. Use an explicit, unbounded cache
# to prevent that from happening. Since lit is fairly
# short-lived, since the set of substitutions is fairly small, and
# since thrashing has such bad consequences, not bounding the cache
# seems reasonable.
ln = _caching_re_compile(a).sub(str(b), escape(ln))
# Strip the trailing newline and any extra whitespace.
return ln.strip()
def processLineToFixedPoint(ln):
assert isinstance(recursion_limit, int) and recursion_limit >= 0
origLine = ln
steps = 0
processed = processLine(ln)
while processed != ln and steps < recursion_limit:
ln = processed
processed = processLine(ln)
steps += 1
if processed != ln:
raise ValueError("Recursive substitution of '%s' did not complete "
"in the provided recursion limit (%s)" % \
(origLine, recursion_limit))
return processed
process = processLine if recursion_limit is None else processLineToFixedPoint
return [unescape(process(ln)) for ln in script]
class ParserKind(object):
"""
An enumeration representing the style of an integrated test keyword or
command.
TAG: A keyword taking no value. Ex 'END.'
COMMAND: A keyword taking a list of shell commands. Ex 'RUN:'
LIST: A keyword taking a comma-separated list of values.
BOOLEAN_EXPR: A keyword taking a comma-separated list of
boolean expressions. Ex 'XFAIL:'
INTEGER: A keyword taking a single integer. Ex 'ALLOW_RETRIES:'
CUSTOM: A keyword with custom parsing semantics.
"""
TAG = 0
COMMAND = 1
LIST = 2
BOOLEAN_EXPR = 3
INTEGER = 4
CUSTOM = 5
@staticmethod
def allowedKeywordSuffixes(value):
return { ParserKind.TAG: ['.'],
ParserKind.COMMAND: [':'],
ParserKind.LIST: [':'],
ParserKind.BOOLEAN_EXPR: [':'],
ParserKind.INTEGER: [':'],
ParserKind.CUSTOM: [':', '.']
} [value]
@staticmethod
def str(value):
return { ParserKind.TAG: 'TAG',
ParserKind.COMMAND: 'COMMAND',
ParserKind.LIST: 'LIST',
ParserKind.BOOLEAN_EXPR: 'BOOLEAN_EXPR',
ParserKind.INTEGER: 'INTEGER',
ParserKind.CUSTOM: 'CUSTOM'
} [value]
class IntegratedTestKeywordParser(object):
"""A parser for LLVM/Clang style integrated test scripts.
keyword: The keyword to parse for. It must end in either '.' or ':'.
kind: An value of ParserKind.
parser: A custom parser. This value may only be specified with
ParserKind.CUSTOM.
"""
def __init__(self, keyword, kind, parser=None, initial_value=None):
allowedSuffixes = ParserKind.allowedKeywordSuffixes(kind)
if len(keyword) == 0 or keyword[-1] not in allowedSuffixes:
if len(allowedSuffixes) == 1:
raise ValueError("Keyword '%s' of kind '%s' must end in '%s'"
% (keyword, ParserKind.str(kind),
allowedSuffixes[0]))
else:
raise ValueError("Keyword '%s' of kind '%s' must end in "
" one of '%s'"
% (keyword, ParserKind.str(kind),
' '.join(allowedSuffixes)))
if parser is not None and kind != ParserKind.CUSTOM:
raise ValueError("custom parsers can only be specified with "
"ParserKind.CUSTOM")
self.keyword = keyword
self.kind = kind
self.parsed_lines = []
self.value = initial_value
self.parser = parser
if kind == ParserKind.COMMAND:
self.parser = lambda line_number, line, output: \
self._handleCommand(line_number, line, output,
self.keyword)
elif kind == ParserKind.LIST:
self.parser = self._handleList
elif kind == ParserKind.BOOLEAN_EXPR:
self.parser = self._handleBooleanExpr
elif kind == ParserKind.INTEGER:
self.parser = self._handleSingleInteger
elif kind == ParserKind.TAG:
self.parser = self._handleTag
elif kind == ParserKind.CUSTOM:
if parser is None:
raise ValueError("ParserKind.CUSTOM requires a custom parser")
self.parser = parser
else:
raise ValueError("Unknown kind '%s'" % kind)
def parseLine(self, line_number, line):
try:
self.parsed_lines += [(line_number, line)]
self.value = self.parser(line_number, line, self.value)
except ValueError as e:
raise ValueError(str(e) + ("\nin %s directive on test line %d" %
(self.keyword, line_number)))
def getValue(self):
return self.value
@staticmethod
def _handleTag(line_number, line, output):
"""A helper for parsing TAG type keywords"""
return (not line.strip() or output)
@staticmethod
def _handleCommand(line_number, line, output, keyword):
"""A helper for parsing COMMAND type keywords"""
# Trim trailing whitespace.
line = line.rstrip()
# Substitute line number expressions
line = re.sub(r'%\(line\)', str(line_number), line)
def replace_line_number(match):
if match.group(1) == '+':
return str(line_number + int(match.group(2)))
if match.group(1) == '-':
return str(line_number - int(match.group(2)))
line = re.sub(r'%\(line *([\+-]) *(\d+)\)', replace_line_number, line)
# Collapse lines with trailing '\\'.
if output and output[-1][-1] == '\\':
output[-1] = output[-1][:-1] + line
else:
if output is None:
output = []
pdbg = "%dbg({keyword} at line {line_number})".format(
keyword=keyword,
line_number=line_number)
assert re.match(kPdbgRegex + "$", pdbg), \
"kPdbgRegex expected to match actual %dbg usage"
line = "{pdbg} {real_command}".format(
pdbg=pdbg,
real_command=line)
output.append(line)
return output
@staticmethod
def _handleList(line_number, line, output):
"""A parser for LIST type keywords"""
if output is None:
output = []
output.extend([s.strip() for s in line.split(',')])
return output
@staticmethod
def _handleSingleInteger(line_number, line, output):
"""A parser for INTEGER type keywords"""
if output is None:
output = []
try:
n = int(line)
except ValueError:
raise ValueError("INTEGER parser requires the input to be an integer (got {})".format(line))
output.append(n)
return output
@staticmethod
def _handleBooleanExpr(line_number, line, output):
"""A parser for BOOLEAN_EXPR type keywords"""
parts = [s.strip() for s in line.split(',') if s.strip() != '']
if output and output[-1][-1] == '\\':
output[-1] = output[-1][:-1] + parts[0]
del parts[0]
if output is None:
output = []
output.extend(parts)
# Evaluate each expression to verify syntax.
# We don't want any results, just the raised ValueError.
for s in output:
if s != '*' and not s.endswith('\\'):
BooleanExpression.evaluate(s, [])
return output
def _parseKeywords(sourcepath, additional_parsers=[],
require_script=True):
"""_parseKeywords
Scan an LLVM/Clang style integrated test script and extract all the lines
pertaining to a special parser. This includes 'RUN', 'XFAIL', 'REQUIRES',
'UNSUPPORTED' and 'ALLOW_RETRIES', as well as other specified custom
parsers.
Returns a dictionary mapping each custom parser to its value after
parsing the test.
"""
# Install the built-in keyword parsers.
script = []
builtin_parsers = [
IntegratedTestKeywordParser('RUN:', ParserKind.COMMAND, initial_value=script),
IntegratedTestKeywordParser('XFAIL:', ParserKind.BOOLEAN_EXPR),
IntegratedTestKeywordParser('REQUIRES:', ParserKind.BOOLEAN_EXPR),
IntegratedTestKeywordParser('UNSUPPORTED:', ParserKind.BOOLEAN_EXPR),
IntegratedTestKeywordParser('ALLOW_RETRIES:', ParserKind.INTEGER),
IntegratedTestKeywordParser('END.', ParserKind.TAG)
]
keyword_parsers = {p.keyword: p for p in builtin_parsers}
# Install user-defined additional parsers.
for parser in additional_parsers:
if not isinstance(parser, IntegratedTestKeywordParser):
raise ValueError('Additional parser must be an instance of '
'IntegratedTestKeywordParser')
if parser.keyword in keyword_parsers:
raise ValueError("Parser for keyword '%s' already exists"
% parser.keyword)
keyword_parsers[parser.keyword] = parser
# Collect the test lines from the script.
for line_number, command_type, ln in \
parseIntegratedTestScriptCommands(sourcepath,
keyword_parsers.keys()):
parser = keyword_parsers[command_type]
parser.parseLine(line_number, ln)
if command_type == 'END.' and parser.getValue() is True:
break
# Verify the script contains a run line.
if require_script and not script:
raise ValueError("Test has no 'RUN:' line")
# Check for unterminated run lines.
if script and script[-1][-1] == '\\':
raise ValueError("Test has unterminated 'RUN:' lines (with '\\')")
# Check boolean expressions for unterminated lines.
for key in keyword_parsers:
kp = keyword_parsers[key]
if kp.kind != ParserKind.BOOLEAN_EXPR:
continue
value = kp.getValue()
if value and value[-1][-1] == '\\':
raise ValueError("Test has unterminated '{key}' lines (with '\\')"
.format(key=key))
# Make sure there's at most one ALLOW_RETRIES: line
allowed_retries = keyword_parsers['ALLOW_RETRIES:'].getValue()
if allowed_retries and len(allowed_retries) > 1:
raise ValueError("Test has more than one ALLOW_RETRIES lines")
return {p.keyword: p.getValue() for p in keyword_parsers.values()}
def parseIntegratedTestScript(test, additional_parsers=[],
require_script=True):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL', 'REQUIRES',
'UNSUPPORTED' and 'ALLOW_RETRIES' information into the given test.
If additional parsers are specified then the test is also scanned for the
keywords they specify and all matches are passed to the custom parser.
If 'require_script' is False an empty script
may be returned. This can be used for test formats where the actual script
is optional or ignored.
"""
# Parse the test sources and extract test properties
try:
parsed = _parseKeywords(test.getSourcePath(), additional_parsers,
require_script)
except ValueError as e:
return lit.Test.Result(Test.UNRESOLVED, str(e))
script = parsed['RUN:'] or []
test.xfails += parsed['XFAIL:'] or []
test.requires += parsed['REQUIRES:'] or []
test.unsupported += parsed['UNSUPPORTED:'] or []
if parsed['ALLOW_RETRIES:']:
test.allowed_retries = parsed['ALLOW_RETRIES:'][0]
# Enforce REQUIRES:
missing_required_features = test.getMissingRequiredFeatures()
if missing_required_features:
msg = ', '.join(missing_required_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test requires the following unavailable "
"features: %s" % msg)
# Enforce UNSUPPORTED:
unsupported_features = test.getUnsupportedFeatures()
if unsupported_features:
msg = ', '.join(unsupported_features)
return lit.Test.Result(
Test.UNSUPPORTED,
"Test does not support the following features "
"and/or targets: %s" % msg)
# Enforce limit_to_features.
if not test.isWithinFeatureLimits():
msg = ', '.join(test.config.limit_to_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test does not require any of the features "
"specified in limit_to_features: %s" % msg)
return script
def _runShTest(test, litConfig, useExternalSh, script, tmpBase):
def runOnce(execdir):
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if isinstance(res, lit.Test.Result):
return res
out,err,exitCode,timeoutInfo = res
if exitCode == 0:
status = Test.PASS
else:
if timeoutInfo is None:
status = Test.FAIL
else:
status = Test.TIMEOUT
return out,err,exitCode,timeoutInfo,status
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
# Re-run failed tests up to test.allowed_retries times.
execdir = os.path.dirname(test.getExecPath())
attempts = test.allowed_retries + 1
for i in range(attempts):
res = runOnce(execdir)
if isinstance(res, lit.Test.Result):
return res
out,err,exitCode,timeoutInfo,status = res
if status != Test.FAIL:
break
# If we had to run the test more than once, count it as a flaky pass. These
# will be printed separately in the test summary.
if i > 0 and status == Test.PASS:
status = Test.FLAKYPASS
# Form the output log.
output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (
'\n'.join(script), exitCode)
if timeoutInfo is not None:
output += """Timeout: %s\n""" % (timeoutInfo,)
output += "\n"
# Append the outputs, if present.
if out:
output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
if err:
output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
return lit.Test.Result(status, output)
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[],
preamble_commands=[]):
if test.config.unsupported:
return lit.Test.Result(Test.UNSUPPORTED, 'Test is unsupported')
script = list(preamble_commands)
parsed = parseIntegratedTestScript(test, require_script=not script)
if isinstance(parsed, lit.Test.Result):
return parsed
script += parsed
if litConfig.noExecute:
return lit.Test.Result(Test.PASS)
tmpDir, tmpBase = getTempPaths(test)
substitutions = list(extra_substitutions)
substitutions += getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
script = applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
return _runShTest(test, litConfig, useExternalSh, script, tmpBase)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/TestRunner.py |
from __future__ import absolute_import
import inspect
import os
import platform
import sys
import lit.Test
import lit.formats
import lit.TestingConfig
import lit.util
# LitConfig must be a new style class for properties to work
class LitConfig(object):
"""LitConfig - Configuration data for a 'lit' test runner instance, shared
across all tests.
The LitConfig object is also used to communicate with client configuration
files, it is always passed in as the global variable 'lit' so that
configuration files can access common functionality and internal components
easily.
"""
def __init__(self, progname, path, quiet,
useValgrind, valgrindLeakCheck, valgrindArgs,
noExecute, debug, isWindows,
params, config_prefix = None,
maxIndividualTestTime = 0,
parallelism_groups = {},
echo_all_commands = False):
# The name of the test runner.
self.progname = progname
# The items to add to the PATH environment variable.
self.path = [str(p) for p in path]
self.quiet = bool(quiet)
self.useValgrind = bool(useValgrind)
self.valgrindLeakCheck = bool(valgrindLeakCheck)
self.valgrindUserArgs = list(valgrindArgs)
self.noExecute = noExecute
self.debug = debug
self.isWindows = bool(isWindows)
self.params = dict(params)
self.bashPath = None
# Configuration files to look for when discovering test suites.
self.config_prefix = config_prefix or 'lit'
self.suffixes = ['cfg.py', 'cfg']
self.config_names = ['%s.%s' % (self.config_prefix,x) for x in self.suffixes]
self.site_config_names = ['%s.site.%s' % (self.config_prefix,x) for x in self.suffixes]
self.local_config_names = ['%s.local.%s' % (self.config_prefix,x) for x in self.suffixes]
self.numErrors = 0
self.numWarnings = 0
self.valgrindArgs = []
if self.useValgrind:
self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
'--tool=memcheck', '--trace-children=yes',
'--error-exitcode=123']
if self.valgrindLeakCheck:
self.valgrindArgs.append('--leak-check=full')
else:
# The default is 'summary'.
self.valgrindArgs.append('--leak-check=no')
self.valgrindArgs.extend(self.valgrindUserArgs)
self.maxIndividualTestTime = maxIndividualTestTime
self.parallelism_groups = parallelism_groups
self.echo_all_commands = echo_all_commands
@property
def maxIndividualTestTime(self):
"""
Interface for getting maximum time to spend executing
a single test
"""
return self._maxIndividualTestTime
@property
def maxIndividualTestTimeIsSupported(self):
"""
Returns a tuple (<supported> , <error message>)
where
`<supported>` is True if setting maxIndividualTestTime is supported
on the current host, returns False otherwise.
`<error message>` is an empty string if `<supported>` is True,
otherwise is contains a string describing why setting
maxIndividualTestTime is not supported.
"""
return lit.util.killProcessAndChildrenIsSupported()
@maxIndividualTestTime.setter
def maxIndividualTestTime(self, value):
"""
Interface for setting maximum time to spend executing
a single test
"""
if not isinstance(value, int):
self.fatal('maxIndividualTestTime must set to a value of type int.')
self._maxIndividualTestTime = value
if self.maxIndividualTestTime > 0:
# The current implementation needs psutil on some platforms to set
# a timeout per test. Check it's available.
# See lit.util.killProcessAndChildren()
supported, errormsg = self.maxIndividualTestTimeIsSupported
if not supported:
self.fatal('Setting a timeout per test not supported. ' +
errormsg)
elif self.maxIndividualTestTime < 0:
self.fatal('The timeout per test must be >= 0 seconds')
def load_config(self, config, path):
"""load_config(config, path) - Load a config object from an alternate
path."""
if self.debug:
self.note('load_config from %r' % path)
config.load_from_path(path, self)
return config
def getBashPath(self):
"""getBashPath - Get the path to 'bash'"""
if self.bashPath is not None:
return self.bashPath
self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
if self.bashPath is None:
self.bashPath = lit.util.which('bash')
if self.bashPath is None:
self.bashPath = ''
# Check whether the found version of bash is able to cope with paths in
# the host path format. If not, don't return it as it can't be used to
# run scripts. For example, WSL's bash.exe requires '/mnt/c/foo' rather
# than 'C:\\foo' or 'C:/foo'.
if self.isWindows and self.bashPath:
command = [self.bashPath, '-c',
'[[ -f "%s" ]]' % self.bashPath.replace('\\', '\\\\')]
_, _, exitCode = lit.util.executeCommand(command)
if exitCode:
self.note('bash command failed: %s' % (
' '.join('"%s"' % c for c in command)))
self.bashPath = ''
if not self.bashPath:
self.warning('Unable to find a usable version of bash.')
return self.bashPath
def getToolsPath(self, dir, paths, tools):
if dir is not None and os.path.isabs(dir) and os.path.isdir(dir):
if not lit.util.checkToolsPath(dir, tools):
return None
else:
dir = lit.util.whichTools(tools, paths)
# bash
self.bashPath = lit.util.which('bash', dir)
if self.bashPath is None:
self.bashPath = ''
return dir
def _write_message(self, kind, message):
# Get the file/line where this message was generated.
f = inspect.currentframe()
# Step out of _write_message, and then out of wrapper.
f = f.f_back.f_back
file = os.path.abspath(inspect.getsourcefile(f))
line = inspect.getlineno(f)
sys.stderr.write('%s: %s:%d: %s: %s\n' % (self.progname, file, line,
kind, message))
def note(self, message):
if not self.quiet:
self._write_message('note', message)
def warning(self, message):
if not self.quiet:
self._write_message('warning', message)
self.numWarnings += 1
def error(self, message):
self._write_message('error', message)
self.numErrors += 1
def fatal(self, message):
self._write_message('fatal', message)
sys.exit(2)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/LitConfig.py |
from __future__ import absolute_import
import lit.TestRunner
import lit.util
from .base import FileBasedTest
class ShTest(FileBasedTest):
"""ShTest is a format with one file per test.
This is the primary format for regression tests as described in the LLVM
testing guide:
http://llvm.org/docs/TestingGuide.html
The ShTest files contain some number of shell-like command pipelines, along
with assertions about what should be in the output.
"""
def __init__(self, execute_external=False, extra_substitutions=[],
preamble_commands=[]):
self.execute_external = execute_external
self.extra_substitutions = extra_substitutions
self.preamble_commands = preamble_commands
def execute(self, test, litConfig):
return lit.TestRunner.executeShTest(test, litConfig,
self.execute_external,
self.extra_substitutions,
self.preamble_commands)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/formats/shtest.py |
from lit.formats.base import ( # noqa: F401
TestFormat,
FileBasedTest,
OneCommandPerFileTest,
ExecutableTest
)
from lit.formats.googletest import GoogleTest # noqa: F401
from lit.formats.shtest import ShTest # noqa: F401
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/formats/__init__.py |
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from .base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleTest(TestFormat):
def __init__(self, test_sub_dirs, test_suffix):
self.test_sub_dirs = str(test_sub_dirs).split(';')
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
list_test_cmd = self.maybeAddPythonToCmd([path, '--gtest_list_tests'])
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-tests in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
# This doesn't look like a valid gtest file. This can
# have a number of causes, none of them good. For
# instance, we could have created a broken executable.
# Alternatively, someone has cruft in their test
# directory. If we don't return a test here, then no
# failures will get reported, so return a dummy test name
# so that the failure is reported later.
yield 'failed_to_discover_tests_from_gtest'
return
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if 'Running main() from gtest_main.cc' in ln:
# Upstream googletest prints this to stdout prior to running
# tests. LLVM removed that print statement in r61540, but we
# handle it here in case upstream googletest is being used.
continue
# The test name list includes trailing comments beginning with
# a '#' on some lines, so skip those. We don't support test names
# that use escaping to embed '#' into their name as the names come
# from C++ class and method names where such things are hard and
# uninteresting to support.
ln = ln.split('#', 1)[0].rstrip()
if not ln.lstrip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getGTestTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--gtest_filter=' + testName]
cmd = self.maybeAddPythonToCmd(cmd)
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, out + err
passing_test_line = '[ PASSED ] 1 test.'
if passing_test_line not in out:
msg = ('Unable to find %r in gtest output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS,''
def maybeAddPythonToCmd(self, cmd):
"""Insert the python exe into the command if cmd[0] ends in .py
We cannot rely on the system to interpret shebang lines for us on
Windows, so add the python executable to the command if this is a .py
script.
"""
if cmd[0].endswith('.py'):
return [sys.executable] + cmd
return cmd
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/formats/googletest.py |
from __future__ import absolute_import
import os
import lit.Test
import lit.util
class TestFormat(object):
pass
###
class FileBasedTest(TestFormat):
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if (filename.startswith('.') or
filename in localConfig.excludes):
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
base,ext = os.path.splitext(filename)
if ext in localConfig.suffixes:
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
###
import re
import tempfile
class OneCommandPerFileTest(TestFormat):
# FIXME: Refactor into generic test for running some command on a directory
# of inputs.
def __init__(self, command, dir, recursive=False,
pattern=".*", useTempInput=False):
if isinstance(command, str):
self.command = [command]
else:
self.command = list(command)
if dir is not None:
dir = str(dir)
self.dir = dir
self.recursive = bool(recursive)
self.pattern = re.compile(pattern)
self.useTempInput = useTempInput
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
dir = self.dir
if dir is None:
dir = testSuite.getSourcePath(path_in_suite)
for dirname,subdirs,filenames in os.walk(dir):
if not self.recursive:
subdirs[:] = []
subdirs[:] = [d for d in subdirs
if (d != '.svn' and
d not in localConfig.excludes)]
for filename in filenames:
if (filename.startswith('.') or
not self.pattern.match(filename) or
filename in localConfig.excludes):
continue
path = os.path.join(dirname,filename)
suffix = path[len(dir):]
if suffix.startswith(os.sep):
suffix = suffix[1:]
test = lit.Test.Test(
testSuite, path_in_suite + tuple(suffix.split(os.sep)),
localConfig)
# FIXME: Hack?
test.source_path = path
yield test
def createTempInput(self, tmp, test):
raise NotImplementedError('This is an abstract method.')
def execute(self, test, litConfig):
if test.config.unsupported:
return (lit.Test.UNSUPPORTED, 'Test is unsupported')
cmd = list(self.command)
# If using temp input, create a temporary file and hand it to the
# subclass.
if self.useTempInput:
tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
self.createTempInput(tmp, test)
tmp.flush()
cmd.append(tmp.name)
elif hasattr(test, 'source_path'):
cmd.append(test.source_path)
else:
cmd.append(test.getSourcePath())
out, err, exitCode = lit.util.executeCommand(cmd)
diags = out + err
if not exitCode and not diags.strip():
return lit.Test.PASS,''
# Try to include some useful information.
report = """Command: %s\n""" % ' '.join(["'%s'" % a
for a in cmd])
if self.useTempInput:
report += """Temporary File: %s\n""" % tmp.name
report += "--\n%s--\n""" % open(tmp.name).read()
report += """Output:\n--\n%s--""" % diags
return lit.Test.FAIL, report
###
# Check exit code of a simple executable with no input
class ExecutableTest(FileBasedTest):
def execute(self, test, litConfig):
if test.config.unsupported:
return lit.Test.UNSUPPORTED
out, err, exitCode = lit.util.executeCommand(test.getSourcePath())
if not exitCode:
return lit.Test.PASS, ''
return lit.Test.FAIL, out+err
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/formats/base.py |
import getopt
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def convertToCaretAndMNotation(data):
newdata = StringIO()
if isinstance(data, str):
data = bytearray(data)
for intval in data:
if intval == 9 or intval == 10:
newdata.write(chr(intval))
continue
if intval > 127:
intval = intval -128
newdata.write("M-")
if intval < 32:
newdata.write("^")
newdata.write(chr(intval+64))
elif intval == 127:
newdata.write("^?")
else:
newdata.write(chr(intval))
return newdata.getvalue().encode()
def main(argv):
arguments = argv[1:]
short_options = "v"
long_options = ["show-nonprinting"]
show_nonprinting = False;
try:
options, filenames = getopt.gnu_getopt(arguments, short_options, long_options)
except getopt.GetoptError as err:
sys.stderr.write("Unsupported: 'cat': %s\n" % str(err))
sys.exit(1)
for option, value in options:
if option == "-v" or option == "--show-nonprinting":
show_nonprinting = True;
writer = getattr(sys.stdout, 'buffer', None)
if writer is None:
writer = sys.stdout
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(),os.O_BINARY)
for filename in filenames:
try:
fileToCat = open(filename,"rb")
contents = fileToCat.read()
if show_nonprinting:
contents = convertToCaretAndMNotation(contents)
writer.write(contents)
sys.stdout.flush()
fileToCat.close()
except IOError as error:
sys.stderr.write(str(error))
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/builtin_commands/cat.py |
MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/builtin_commands/__init__.py |
|
import difflib
import functools
import getopt
import io
import locale
import os
import sys
import util
from util import to_string
class DiffFlags():
def __init__(self):
self.ignore_all_space = False
self.ignore_space_change = False
self.unified_diff = False
self.num_context_lines = 3
self.recursive_diff = False
self.strip_trailing_cr = False
def getDirTree(path, basedir=""):
# Tree is a tuple of form (dirname, child_trees).
# An empty dir has child_trees = [], a file has child_trees = None.
child_trees = []
for dirname, child_dirs, files in os.walk(os.path.join(basedir, path)):
for child_dir in child_dirs:
child_trees.append(getDirTree(child_dir, dirname))
for filename in files:
child_trees.append((filename, None))
return path, sorted(child_trees)
def compareTwoFiles(flags, filepaths):
filelines = []
for file in filepaths:
if file == "-":
stdin_fileno = sys.stdin.fileno()
with os.fdopen(os.dup(stdin_fileno), 'rb') as stdin_bin:
filelines.append(stdin_bin.readlines())
else:
with open(file, 'rb') as file_bin:
filelines.append(file_bin.readlines())
try:
return compareTwoTextFiles(flags, filepaths, filelines,
locale.getpreferredencoding(False))
except UnicodeDecodeError:
try:
return compareTwoTextFiles(flags, filepaths, filelines, "utf-8")
except:
return compareTwoBinaryFiles(flags, filepaths, filelines)
def compareTwoBinaryFiles(flags, filepaths, filelines):
exitCode = 0
if hasattr(difflib, 'diff_bytes'):
# python 3.5 or newer
diffs = difflib.diff_bytes(difflib.unified_diff, filelines[0],
filelines[1], filepaths[0].encode(),
filepaths[1].encode(),
n = flags.num_context_lines)
diffs = [diff.decode(errors="backslashreplace") for diff in diffs]
else:
# python 2.7
if flags.unified_diff:
func = difflib.unified_diff
else:
func = difflib.context_diff
diffs = func(filelines[0], filelines[1], filepaths[0], filepaths[1],
n = flags.num_context_lines)
for diff in diffs:
sys.stdout.write(to_string(diff))
exitCode = 1
return exitCode
def compareTwoTextFiles(flags, filepaths, filelines_bin, encoding):
filelines = []
for lines_bin in filelines_bin:
lines = []
for line_bin in lines_bin:
line = line_bin.decode(encoding=encoding)
lines.append(line)
filelines.append(lines)
exitCode = 0
def compose2(f, g):
return lambda x: f(g(x))
f = lambda x: x
if flags.strip_trailing_cr:
f = compose2(lambda line: line.replace('\r\n', '\n'), f)
if flags.ignore_all_space or flags.ignore_space_change:
ignoreSpace = lambda line, separator: \
separator.join(line.split()) + "\n"
ignoreAllSpaceOrSpaceChange = functools.partial(ignoreSpace, separator='' if flags.ignore_all_space else ' ')
f = compose2(ignoreAllSpaceOrSpaceChange, f)
for idx, lines in enumerate(filelines):
filelines[idx]= [f(line) for line in lines]
func = difflib.unified_diff if flags.unified_diff else difflib.context_diff
for diff in func(filelines[0], filelines[1], filepaths[0], filepaths[1],
n = flags.num_context_lines):
sys.stdout.write(to_string(diff))
exitCode = 1
return exitCode
def printDirVsFile(dir_path, file_path):
if os.path.getsize(file_path):
msg = "File %s is a directory while file %s is a regular file"
else:
msg = "File %s is a directory while file %s is a regular empty file"
sys.stdout.write(msg % (dir_path, file_path) + "\n")
def printFileVsDir(file_path, dir_path):
if os.path.getsize(file_path):
msg = "File %s is a regular file while file %s is a directory"
else:
msg = "File %s is a regular empty file while file %s is a directory"
sys.stdout.write(msg % (file_path, dir_path) + "\n")
def printOnlyIn(basedir, path, name):
sys.stdout.write("Only in %s: %s\n" % (os.path.join(basedir, path), name))
def compareDirTrees(flags, dir_trees, base_paths=["", ""]):
# Dirnames of the trees are not checked, it's caller's responsibility,
# as top-level dirnames are always different. Base paths are important
# for doing os.walk, but we don't put it into tree's dirname in order
# to speed up string comparison below and while sorting in getDirTree.
left_tree, right_tree = dir_trees[0], dir_trees[1]
left_base, right_base = base_paths[0], base_paths[1]
# Compare two files or report file vs. directory mismatch.
if left_tree[1] is None and right_tree[1] is None:
return compareTwoFiles(flags,
[os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0])])
if left_tree[1] is None and right_tree[1] is not None:
printFileVsDir(os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0]))
return 1
if left_tree[1] is not None and right_tree[1] is None:
printDirVsFile(os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0]))
return 1
# Compare two directories via recursive use of compareDirTrees.
exitCode = 0
left_names = [node[0] for node in left_tree[1]]
right_names = [node[0] for node in right_tree[1]]
l, r = 0, 0
while l < len(left_names) and r < len(right_names):
# Names are sorted in getDirTree, rely on that order.
if left_names[l] < right_names[r]:
exitCode = 1
printOnlyIn(left_base, left_tree[0], left_names[l])
l += 1
elif left_names[l] > right_names[r]:
exitCode = 1
printOnlyIn(right_base, right_tree[0], right_names[r])
r += 1
else:
exitCode |= compareDirTrees(flags,
[left_tree[1][l], right_tree[1][r]],
[os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0])])
l += 1
r += 1
# At least one of the trees has ended. Report names from the other tree.
while l < len(left_names):
exitCode = 1
printOnlyIn(left_base, left_tree[0], left_names[l])
l += 1
while r < len(right_names):
exitCode = 1
printOnlyIn(right_base, right_tree[0], right_names[r])
r += 1
return exitCode
def main(argv):
if sys.platform == "win32":
if hasattr(sys.stdout, 'buffer'):
# python 3
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline='\n')
else:
# python 2.7
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
args = argv[1:]
try:
opts, args = getopt.gnu_getopt(args, "wbuU:r", ["strip-trailing-cr"])
except getopt.GetoptError as err:
sys.stderr.write("Unsupported: 'diff': %s\n" % str(err))
sys.exit(1)
flags = DiffFlags()
filelines, filepaths, dir_trees = ([] for i in range(3))
for o, a in opts:
if o == "-w":
flags.ignore_all_space = True
elif o == "-b":
flags.ignore_space_change = True
elif o == "-u":
flags.unified_diff = True
elif o.startswith("-U"):
flags.unified_diff = True
try:
flags.num_context_lines = int(a)
if flags.num_context_lines < 0:
raise ValueException
except:
sys.stderr.write("Error: invalid '-U' argument: {}\n"
.format(a))
sys.exit(1)
elif o == "-r":
flags.recursive_diff = True
elif o == "--strip-trailing-cr":
flags.strip_trailing_cr = True
else:
assert False, "unhandled option"
if len(args) != 2:
sys.stderr.write("Error: missing or extra operand\n")
sys.exit(1)
exitCode = 0
try:
for file in args:
if file != "-" and not os.path.isabs(file):
file = os.path.realpath(os.path.join(os.getcwd(), file))
if flags.recursive_diff:
if file == "-":
sys.stderr.write("Error: cannot recursively compare '-'\n")
sys.exit(1)
dir_trees.append(getDirTree(file))
else:
filepaths.append(file)
if not flags.recursive_diff:
exitCode = compareTwoFiles(flags, filepaths)
else:
exitCode = compareDirTrees(flags, dir_trees)
except IOError as err:
sys.stderr.write("Error: 'diff' command failed, %s\n" % str(err))
exitCode = 1
sys.exit(exitCode)
if __name__ == "__main__":
main(sys.argv)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/builtin_commands/diff.py |
import itertools
import os
import platform
import re
import subprocess
import sys
import lit.util
from lit.llvm.subst import FindTool
from lit.llvm.subst import ToolSubst
lit_path_displayed = False
class LLVMConfig(object):
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
features = config.available_features
self.use_lit_shell = False
# Tweak PATH for Win32 to decide to use bash.exe or not.
if sys.platform == 'win32':
# Seek necessary tools in directories and set to $PATH.
path = None
lit_tools_dir = getattr(config, 'lit_tools_dir', None)
required_tools = ['cmp.exe', 'grep.exe', 'sed.exe', 'diff.exe', 'echo.exe']
path = self.lit_config.getToolsPath(lit_tools_dir,
config.environment['PATH'],
required_tools)
if path is None:
path = self._find_git_windows_unix_tools(required_tools)
if path is not None:
self.with_environment('PATH', path, append_path=True)
# Many tools behave strangely if these environment variables aren't set.
self.with_system_environment(['SystemDrive', 'SystemRoot', 'TEMP', 'TMP'])
self.use_lit_shell = True
global lit_path_displayed
if not self.lit_config.quiet and lit_path_displayed is False:
self.lit_config.note("using lit tools: {}".format(path))
lit_path_displayed = True
# Choose between lit's internal shell pipeline runner and a real shell. If
# LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override.
lit_shell_env = os.environ.get('LIT_USE_INTERNAL_SHELL')
if lit_shell_env:
self.use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
if not self.use_lit_shell:
features.add('shell')
# Running on Darwin OS
if platform.system() == 'Darwin':
# FIXME: lld uses the first, other projects use the second.
# We should standardize on the former.
features.add('system-linker-mach-o')
features.add('system-darwin')
elif platform.system() == 'Windows':
# For tests that require Windows to run.
features.add('system-windows')
elif platform.system() == 'Linux':
features.add('system-linux')
elif platform.system() in ['FreeBSD']:
features.add('system-freebsd')
elif platform.system() == 'NetBSD':
features.add('system-netbsd')
elif platform.system() == 'AIX':
features.add('system-aix')
elif platform.system() == 'SunOS':
features.add('system-solaris')
# Native compilation: host arch == default triple arch
# Both of these values should probably be in every site config (e.g. as
# part of the standard header. But currently they aren't)
host_triple = getattr(config, 'host_triple', None)
target_triple = getattr(config, 'target_triple', None)
if host_triple and host_triple == target_triple:
features.add('native')
# Sanitizers.
sanitizers = getattr(config, 'llvm_use_sanitizer', '')
sanitizers = frozenset(x.lower() for x in sanitizers.split(';'))
if 'address' in sanitizers:
features.add('asan')
if 'memory' in sanitizers or 'memorywithorigins' in sanitizers:
features.add('msan')
if 'undefined' in sanitizers:
features.add('ubsan')
have_zlib = getattr(config, 'have_zlib', None)
if have_zlib:
features.add('zlib')
# Check if we should run long running tests.
long_tests = lit_config.params.get('run_long_tests', None)
if lit.util.pythonize_bool(long_tests):
features.add('long_tests')
if target_triple:
if re.match(r'^x86_64.*-apple', target_triple):
features.add('x86_64-apple')
host_cxx = getattr(config, 'host_cxx', None)
if 'address' in sanitizers and self.get_clang_has_lsan(host_cxx, target_triple):
self.with_environment(
'ASAN_OPTIONS', 'detect_leaks=1', append_path=True)
if re.match(r'^x86_64.*-linux', target_triple):
features.add('x86_64-linux')
if re.match(r'^i.86.*', target_triple):
features.add('target-x86')
elif re.match(r'^x86_64.*', target_triple):
features.add('target-x86_64')
elif re.match(r'^aarch64.*', target_triple):
features.add('target-aarch64')
elif re.match(r'^arm.*', target_triple):
features.add('target-arm')
use_gmalloc = lit_config.params.get('use_gmalloc', None)
if lit.util.pythonize_bool(use_gmalloc):
# Allow use of an explicit path for gmalloc library.
# Will default to '/usr/lib/libgmalloc.dylib' if not set.
gmalloc_path_str = lit_config.params.get('gmalloc_path',
'/usr/lib/libgmalloc.dylib')
if gmalloc_path_str is not None:
self.with_environment(
'DYLD_INSERT_LIBRARIES', gmalloc_path_str)
def _find_git_windows_unix_tools(self, tools_needed):
assert(sys.platform == 'win32')
if sys.version_info.major >= 3:
import winreg
else:
import _winreg as winreg
# Search both the 64 and 32-bit hives, as well as HKLM + HKCU
masks = [0, winreg.KEY_WOW64_64KEY]
hives = [winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CURRENT_USER]
for mask, hive in itertools.product(masks, hives):
try:
with winreg.OpenKey(hive, r"SOFTWARE\GitForWindows", 0,
winreg.KEY_READ | mask) as key:
install_root, _ = winreg.QueryValueEx(key, 'InstallPath')
if not install_root:
continue
candidate_path = os.path.join(install_root, 'usr', 'bin')
if not lit.util.checkToolsPath(candidate_path, tools_needed):
continue
# We found it, stop enumerating.
return lit.util.to_string(candidate_path)
except:
continue
return None
def with_environment(self, variable, value, append_path=False):
if append_path:
# For paths, we should be able to take a list of them and process all
# of them.
paths_to_add = value
if lit.util.is_string(paths_to_add):
paths_to_add = [paths_to_add]
def norm(x):
return os.path.normcase(os.path.normpath(x))
current_paths = self.config.environment.get(variable, None)
if current_paths:
current_paths = current_paths.split(os.path.pathsep)
paths = [norm(p) for p in current_paths]
else:
paths = []
# If we are passed a list [a b c], then iterating this list forwards
# and adding each to the beginning would result in c b a. So we
# need to iterate in reverse to end up with the original ordering.
for p in reversed(paths_to_add):
# Move it to the front if it already exists, otherwise insert it at the
# beginning.
p = norm(p)
try:
paths.remove(p)
except ValueError:
pass
paths = [p] + paths
value = os.pathsep.join(paths)
self.config.environment[variable] = value
def with_system_environment(self, variables, append_path=False):
if lit.util.is_string(variables):
variables = [variables]
for v in variables:
value = os.environ.get(v)
if value:
self.with_environment(v, value, append_path)
def clear_environment(self, variables):
for name in variables:
if name in self.config.environment:
del self.config.environment[name]
def get_process_output(self, command):
try:
cmd = subprocess.Popen(
command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=self.config.environment)
stdout, stderr = cmd.communicate()
stdout = lit.util.to_string(stdout)
stderr = lit.util.to_string(stderr)
return (stdout, stderr)
except OSError:
self.lit_config.fatal('Could not run process %s' % command)
def feature_config(self, features):
# Ask llvm-config about the specified feature.
arguments = [x for (x, _) in features]
config_path = os.path.join(self.config.llvm_tools_dir, 'llvm-config')
output, _ = self.get_process_output([config_path] + arguments)
lines = output.split('\n')
for (feature_line, (_, patterns)) in zip(lines, features):
# We should have either a callable or a dictionary. If it's a
# dictionary, grep each key against the output and use the value if
# it matches. If it's a callable, it does the entire translation.
if callable(patterns):
features_to_add = patterns(feature_line)
self.config.available_features.update(features_to_add)
else:
for (re_pattern, feature) in patterns.items():
if re.search(re_pattern, feature_line):
self.config.available_features.add(feature)
# Note that when substituting %clang_cc1 also fill in the include directory of
# the builtin headers. Those are part of even a freestanding environment, but
# Clang relies on the driver to locate them.
def get_clang_builtin_include_dir(self, clang):
# FIXME: Rather than just getting the version, we should have clang print
# out its resource dir here in an easy to scrape form.
clang_dir, _ = self.get_process_output(
[clang, '-print-file-name=include'])
if not clang_dir:
self.lit_config.fatal(
"Couldn't find the include dir for Clang ('%s')" % clang)
clang_dir = clang_dir.strip()
if sys.platform in ['win32'] and not self.use_lit_shell:
# Don't pass dosish path separator to msys bash.exe.
clang_dir = clang_dir.replace('\\', '/')
# Ensure the result is an ascii string, across Python2.5+ - Python3.
return clang_dir
# On macOS, LSan is only supported on clang versions 5 and higher
def get_clang_has_lsan(self, clang, triple):
if not clang:
self.lit_config.warning(
'config.host_cxx is unset but test suite is configured to use sanitizers.')
return False
clang_binary = clang.split()[0]
version_string, _ = self.get_process_output(
[clang_binary, '--version'])
if not 'clang' in version_string:
self.lit_config.warning(
"compiler '%s' does not appear to be clang, " % clang_binary +
'but test suite is configured to use sanitizers.')
return False
if re.match(r'.*-linux', triple):
return True
if re.match(r'^x86_64.*-apple', triple):
version_regex = re.search(r'version ([0-9]+)\.([0-9]+).([0-9]+)', version_string)
major_version_number = int(version_regex.group(1))
minor_version_number = int(version_regex.group(2))
patch_version_number = int(version_regex.group(3))
if ('Apple LLVM' in version_string) or ('Apple clang' in version_string):
# Apple clang doesn't yet support LSan
return False
else:
return major_version_number >= 5
return False
def make_itanium_abi_triple(self, triple):
m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
if not m:
self.lit_config.fatal(
"Could not turn '%s' into Itanium ABI triple" % triple)
if m.group(3).lower() != 'windows':
# All non-windows triples use the Itanium ABI.
return triple
return m.group(1) + '-' + m.group(2) + '-' + m.group(3) + '-gnu'
def make_msabi_triple(self, triple):
m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
if not m:
self.lit_config.fatal(
"Could not turn '%s' into MS ABI triple" % triple)
isa = m.group(1).lower()
vendor = m.group(2).lower()
os = m.group(3).lower()
if os == 'windows' and re.match(r'.*-msvc$', triple):
# If the OS is windows and environment is msvc, we're done.
return triple
if isa.startswith('x86') or isa == 'amd64' or re.match(r'i\d86', isa):
# For x86 ISAs, adjust the OS.
return isa + '-' + vendor + '-windows-msvc'
# -msvc is not supported for non-x86 targets; use a default.
return 'i686-pc-windows-msvc'
def add_tool_substitutions(self, tools, search_dirs=None):
if not search_dirs:
search_dirs = [self.config.llvm_tools_dir]
if lit.util.is_string(search_dirs):
search_dirs = [search_dirs]
tools = [x if isinstance(x, ToolSubst) else ToolSubst(x)
for x in tools]
search_dirs = os.pathsep.join(search_dirs)
substitutions = []
for tool in tools:
match = tool.resolve(self, search_dirs)
# Either no match occurred, or there was an unresolved match that
# is ignored.
if not match:
continue
subst_key, tool_pipe, command = match
# An unresolved match occurred that can't be ignored. Fail without
# adding any of the previously-discovered substitutions.
if not command:
return False
substitutions.append((subst_key, tool_pipe + command))
self.config.substitutions.extend(substitutions)
return True
def use_default_substitutions(self):
tool_patterns = [
ToolSubst('FileCheck', unresolved='fatal'),
# Handle these specially as they are strings searched for during testing.
ToolSubst(r'\| \bcount\b', command=FindTool(
'count'), verbatim=True, unresolved='fatal'),
ToolSubst(r'\| \bnot\b', command=FindTool('not'), verbatim=True, unresolved='fatal')]
self.config.substitutions.append(('%python', '"%s"' % (sys.executable)))
self.add_tool_substitutions(
tool_patterns, [self.config.llvm_tools_dir])
def use_llvm_tool(self, name, search_env=None, required=False, quiet=False):
"""Find the executable program 'name', optionally using the specified
environment variable as an override before searching the
configuration's PATH."""
# If the override is specified in the environment, use it without
# validation.
if search_env:
tool = self.config.environment.get(search_env)
if tool:
return tool
# Otherwise look in the path.
tool = lit.util.which(name, self.config.environment['PATH'])
if required and not tool:
message = "couldn't find '{}' program".format(name)
if search_env:
message = message + \
', try setting {} in your environment'.format(search_env)
self.lit_config.fatal(message)
if tool:
tool = os.path.normpath(tool)
if not self.lit_config.quiet and not quiet:
self.lit_config.note('using {}: {}'.format(name, tool))
return tool
def use_clang(self, additional_tool_dirs=[], additional_flags=[], required=True):
"""Configure the test suite to be able to invoke clang.
Sets up some environment variables important to clang, locates a
just-built or installed clang, and add a set of standard
substitutions useful to any test suite that makes use of clang.
"""
# Clear some environment variables that might affect Clang.
#
# This first set of vars are read by Clang, but shouldn't affect tests
# that aren't specifically looking for these features, or are required
# simply to run the tests at all.
#
# FIXME: Should we have a tool that enforces this?
# safe_env_vars = ('TMPDIR', 'TEMP', 'TMP', 'USERPROFILE', 'PWD',
# 'MACOSX_DEPLOYMENT_TARGET', 'IPHONEOS_DEPLOYMENT_TARGET',
# 'VCINSTALLDIR', 'VC100COMNTOOLS', 'VC90COMNTOOLS',
# 'VC80COMNTOOLS')
possibly_dangerous_env_vars = ['COMPILER_PATH', 'RC_DEBUG_OPTIONS',
'CINDEXTEST_PREAMBLE_FILE', 'LIBRARY_PATH',
'CPATH', 'C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH',
'OBJC_INCLUDE_PATH', 'OBJCPLUS_INCLUDE_PATH',
'LIBCLANG_TIMING', 'LIBCLANG_OBJTRACKING',
'LIBCLANG_LOGGING', 'LIBCLANG_BGPRIO_INDEX',
'LIBCLANG_BGPRIO_EDIT', 'LIBCLANG_NOTHREADS',
'LIBCLANG_RESOURCE_USAGE',
'LIBCLANG_CODE_COMPLETION_LOGGING']
# Clang/Win32 may refer to %INCLUDE%. vsvarsall.bat sets it.
if platform.system() != 'Windows':
possibly_dangerous_env_vars.append('INCLUDE')
self.clear_environment(possibly_dangerous_env_vars)
# Tweak the PATH to include the tools dir and the scripts dir.
# Put Clang first to avoid LLVM from overriding out-of-tree clang builds.
exe_dir_props = [self.config.name.lower() + '_tools_dir', 'clang_tools_dir', 'llvm_tools_dir']
paths = [getattr(self.config, pp) for pp in exe_dir_props
if getattr(self.config, pp, None)]
paths = additional_tool_dirs + paths
self.with_environment('PATH', paths, append_path=True)
lib_dir_props = [self.config.name.lower() + '_libs_dir', 'clang_libs_dir', 'llvm_shlib_dir', 'llvm_libs_dir']
paths = [getattr(self.config, pp) for pp in lib_dir_props
if getattr(self.config, pp, None)]
self.with_environment('LD_LIBRARY_PATH', paths, append_path=True)
shl = getattr(self.config, 'llvm_shlib_dir', None)
pext = getattr(self.config, 'llvm_plugin_ext', None)
if shl:
self.config.substitutions.append(('%llvmshlibdir', shl))
if pext:
self.config.substitutions.append(('%pluginext', pext))
# Discover the 'clang' and 'clangcc' to use.
self.config.clang = self.use_llvm_tool(
'clang', search_env='CLANG', required=required)
if self.config.clang:
self.config.available_features.add('clang')
builtin_include_dir = self.get_clang_builtin_include_dir(self.config.clang)
tool_substitutions = [
ToolSubst('%clang', command=self.config.clang, extra_args=additional_flags),
ToolSubst('%clang_analyze_cc1', command='%clang_cc1', extra_args=['-analyze', '%analyze', '-setup-static-analyzer']+additional_flags),
ToolSubst('%clang_cc1', command=self.config.clang, extra_args=['-cc1', '-internal-isystem', builtin_include_dir, '-nostdsysteminc']+additional_flags),
ToolSubst('%clang_cpp', command=self.config.clang, extra_args=['--driver-mode=cpp']+additional_flags),
ToolSubst('%clang_cl', command=self.config.clang, extra_args=['--driver-mode=cl']+additional_flags),
ToolSubst('%clangxx', command=self.config.clang, extra_args=['--driver-mode=g++']+additional_flags),
]
self.add_tool_substitutions(tool_substitutions)
self.config.substitutions.append(
('%resource_dir', builtin_include_dir))
self.config.substitutions.append(('%itanium_abi_triple',
self.make_itanium_abi_triple(self.config.target_triple)))
self.config.substitutions.append(('%ms_abi_triple',
self.make_msabi_triple(self.config.target_triple)))
# The host triple might not be set, at least if we're compiling clang from
# an already installed llvm.
if self.config.host_triple and self.config.host_triple != '@LLVM_HOST_TRIPLE@':
self.config.substitutions.append(('%target_itanium_abi_host_triple',
'--target=%s' % self.make_itanium_abi_triple(self.config.host_triple)))
else:
self.config.substitutions.append(
('%target_itanium_abi_host_triple', ''))
# FIXME: Find nicer way to prohibit this.
self.config.substitutions.append(
(' clang ', """\"*** Do not use 'clang' in tests, use '%clang'. ***\""""))
self.config.substitutions.append(
(r' clang\+\+ ', """\"*** Do not use 'clang++' in tests, use '%clangxx'. ***\""""))
self.config.substitutions.append(
(' clang-cc ',
"""\"*** Do not use 'clang-cc' in tests, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' clang-cl ',
"""\"*** Do not use 'clang-cl' in tests, use '%clang_cl'. ***\""""))
self.config.substitutions.append(
(' clang -cc1 -analyze ',
"""\"*** Do not use 'clang -cc1 -analyze' in tests, use '%clang_analyze_cc1'. ***\""""))
self.config.substitutions.append(
(' clang -cc1 ',
"""\"*** Do not use 'clang -cc1' in tests, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' %clang-cc1 ',
"""\"*** invalid substitution, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' %clang-cpp ',
"""\"*** invalid substitution, use '%clang_cpp'. ***\""""))
self.config.substitutions.append(
(' %clang-cl ',
"""\"*** invalid substitution, use '%clang_cl'. ***\""""))
def use_lld(self, additional_tool_dirs=[], required=True):
"""Configure the test suite to be able to invoke lld.
Sets up some environment variables important to lld, locates a
just-built or installed lld, and add a set of standard
substitutions useful to any test suite that makes use of lld.
"""
# Tweak the PATH to include the tools dir and the scripts dir.
exe_dir_props = [self.config.name.lower() + '_tools_dir', 'lld_tools_dir', 'llvm_tools_dir']
paths = [getattr(self.config, pp) for pp in exe_dir_props
if getattr(self.config, pp, None)]
paths = additional_tool_dirs + paths
self.with_environment('PATH', paths, append_path=True)
lib_dir_props = [self.config.name.lower() + '_libs_dir', 'lld_libs_dir', 'llvm_libs_dir']
paths = [getattr(self.config, pp) for pp in lib_dir_props
if getattr(self.config, pp, None)]
self.with_environment('LD_LIBRARY_PATH', paths, append_path=True)
# Discover the 'clang' and 'clangcc' to use.
ld_lld = self.use_llvm_tool('ld.lld', required=required)
lld_link = self.use_llvm_tool('lld-link', required=required)
ld64_lld = self.use_llvm_tool('ld64.lld', required=required)
wasm_ld = self.use_llvm_tool('wasm-ld', required=required)
was_found = ld_lld and lld_link and ld64_lld and wasm_ld
tool_substitutions = []
if ld_lld:
tool_substitutions.append(ToolSubst(r'ld\.lld', command=ld_lld))
if lld_link:
tool_substitutions.append(ToolSubst('lld-link', command=lld_link))
if ld64_lld:
tool_substitutions.append(ToolSubst(r'ld64\.lld', command=ld64_lld))
if wasm_ld:
tool_substitutions.append(ToolSubst('wasm-ld', command=wasm_ld))
self.add_tool_substitutions(tool_substitutions)
return was_found
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/llvm/config.py |
from lit.llvm import config
llvm_config = None
def initialize(lit_config, test_config):
global llvm_config
llvm_config = config.LLVMConfig(lit_config, test_config)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/llvm/__init__.py |
import os
import re
import lit.util
expr = re.compile(r"^(\\)?((\| )?)\W+b(\S+)\\b\W*$")
wordifier = re.compile(r"(\W*)(\b[^\b]+\b)")
class FindTool(object):
def __init__(self, name):
self.name = name
def resolve(self, config, dirs):
# Check for a user explicitely overriding a tool. This allows:
# llvm-lit -D llc="llc -enable-misched -verify-machineinstrs"
command = config.lit_config.params.get(self.name)
if command is None:
# Then check out search paths.
command = lit.util.which(self.name, dirs)
if not command:
return None
if self.name == 'llc' and os.environ.get('LLVM_ENABLE_MACHINE_VERIFIER') == '1':
command += ' -verify-machineinstrs'
elif self.name == 'llvm-go':
exe = getattr(config.config, 'go_executable', None)
if exe:
command += ' go=' + exe
return command
class ToolSubst(object):
"""String-like class used to build regex substitution patterns for llvm
tools.
Handles things like adding word-boundary patterns, and filtering
characters from the beginning an end of a tool name
"""
def __init__(self, key, command=None, pre=r'.-^/\<', post='-.', verbatim=False,
unresolved='warn', extra_args=None):
"""Construct a ToolSubst.
key: The text which is to be substituted.
command: The command to substitute when the key is matched. By default,
this will treat `key` as a tool name and search for it. If it is
a string, it is intereprted as an exact path. If it is an instance of
FindTool, the specified tool name is searched for on disk.
pre: If specified, the substitution will not find matches where
the character immediately preceding the word-boundary that begins
`key` is any of the characters in the string `pre`.
post: If specified, the substitution will not find matches where
the character immediately after the word-boundary that ends `key`
is any of the characters specified in the string `post`.
verbatim: If True, `key` is an exact regex that is passed to the
underlying substitution
unresolved: Action to take if the tool substitution cannot be
resolved. Valid values:
'warn' - log a warning but add the substitution anyway.
'fatal' - Exit the test suite and log a fatal error.
'break' - Don't add any of the substitutions from the current
group, and return a value indicating a failure.
'ignore' - Don't add the substitution, and don't log an error
extra_args: If specified, represents a list of arguments that will be
appended to the tool's substitution.
explicit_path: If specified, the exact path will be used as a substitution.
Otherwise, the tool will be searched for as if by calling which(tool)
"""
self.unresolved = unresolved
self.extra_args = extra_args
self.key = key
self.command = command if command is not None else FindTool(key)
self.was_resolved = False
if verbatim:
self.regex = key
return
def not_in(chars, where=''):
if not chars:
return ''
pattern_str = '|'.join(re.escape(x) for x in chars)
return r'(?{}!({}))'.format(where, pattern_str)
def wordify(word):
match = wordifier.match(word)
introducer = match.group(1)
word = match.group(2)
return introducer + r'\b' + word + r'\b'
self.regex = not_in(pre, '<') + wordify(key) + not_in(post)
def resolve(self, config, search_dirs):
# Extract the tool name from the pattern. This relies on the tool
# name being surrounded by \b word match operators. If the
# pattern starts with "| ", include it in the string to be
# substituted.
tool_match = expr.match(self.regex)
if not tool_match:
return None
tool_pipe = tool_match.group(2)
tool_name = tool_match.group(4)
if isinstance(self.command, FindTool):
command_str = self.command.resolve(config, search_dirs)
else:
command_str = str(self.command)
if command_str:
if self.extra_args:
command_str = ' '.join([command_str] + self.extra_args)
else:
if self.unresolved == 'warn':
# Warn, but still provide a substitution.
config.lit_config.note(
'Did not find ' + tool_name + ' in %s' % search_dirs)
command_str = os.path.join(
config.config.llvm_tools_dir, tool_name)
elif self.unresolved == 'fatal':
# The function won't even return in this case, this leads to
# sys.exit
config.lit_config.fatal(
'Did not find ' + tool_name + ' in %s' % search_dirs)
elif self.unresolved == 'break':
# By returning a valid result with an empty command, the
# caller treats this as a failure.
pass
elif self.unresolved == 'ignore':
# By returning None, the caller just assumes there was no
# match in the first place.
return None
else:
raise 'Unexpected value for ToolSubst.unresolved'
if command_str:
self.was_resolved = True
return (self.regex, tool_pipe, command_str)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/lit/llvm/subst.py |
# Check cases where LIT_OPTS has no effect.
#
# RUN: %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s
# RUN: env LIT_OPTS= %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s
# RUN: env LIT_OPTS=-s %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s
# Check that LIT_OPTS can override command-line options.
#
# RUN: env LIT_OPTS=-a \
# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \
# RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR= %s
# Check that LIT_OPTS understands multiple options with arbitrary spacing.
#
# RUN: env LIT_OPTS='-a -v -Dvar=foobar' \
# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \
# RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR=foobar %s
# Check that LIT_OPTS parses shell-like quotes and escapes.
#
# RUN: env LIT_OPTS='-a -v -Dvar="foo bar"\ baz' \
# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \
# RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR="foo bar baz" %s
# CHECK: Testing: 1 tests
# CHECK-NOT: PASS
# CHECK: Passed: 1
# SHOW-ALL: Testing: 1 tests
# SHOW-ALL: PASS: lit-opts :: test.txt (1 of 1)
# SHOW-ALL: {{^}}[[VAR]]
# SHOW-ALL-NOT: PASS
# SHOW-ALL: Passed: 1
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/lit-opts.py |
# Check that we route argv[0] as it was written, instead of the resolved
# path. This is important for some tools, in particular '[' which at least on OS
# X only recognizes that it is in '['-mode when its argv[0] is exactly
# '['. Otherwise it will refuse to accept the trailing closing bracket.
#
# This test is not supported on AIX since `[` is only available as a shell builtin
# and is not installed under PATH by default.
# UNSUPPORTED: system-aix
#
# RUN: %{lit} -j 1 -v %{inputs}/shtest-format-argv0 | FileCheck %s
# CHECK: -- Testing:
# CHECK: PASS: shtest-format-argv0 :: argv0.txt
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-format-argv0.py |
# Test the boolean expression parser
# used for REQUIRES and UNSUPPORTED and XFAIL
# RUN: %{python} -m lit.BooleanExpression
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/boolean-parsing.py |
# Check that we do not crash if a parallelism group is set to None. Permits
# usage of the following pattern.
#
# [lit.common.cfg]
# lit_config.parallelism_groups['my_group'] = None
# if <condition>:
# lit_config.parallelism_groups['my_group'] = 3
#
# [project/lit.cfg]
# config.parallelism_group = 'my_group'
#
# RUN: %{lit} -j2 %{inputs}/parallelism-groups | FileCheck %s
# CHECK: -- Testing: 2 tests, 2 workers --
# CHECK-DAG: PASS: parallelism-groups :: test1.txt
# CHECK-DAG: PASS: parallelism-groups :: test2.txt
# CHECK: Passed: 2
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/parallelism-groups.py |
# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out
# RUN: FileCheck < %t.results.out %s
# RUN: rm %t.results.out
# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": null,
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "micro_value0": 4,
# CHECK-NEXT: "micro_value1": 1.3
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
# CHECK-NEXT: "output": ""
# CHECK-NEXT: },
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": null,
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "micro_value0": 4,
# CHECK-NEXT: "micro_value1": 1.3
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
# CHECK-NEXT: "output": ""
# CHECK-NEXT: },
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": null,
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "micro_value0": 4,
# CHECK-NEXT: "micro_value1": 1.3
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
# CHECK-NEXT: "output": ""
# CHECK-NEXT: },
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/test-output-micro.py |
# REQUIRES: lit-max-individual-test-time
# llvm.org/PR33944
# UNSUPPORTED: system-windows
###############################################################################
# Check tests can hit timeout when set
###############################################################################
# Test per test timeout using external shell
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: -j 1 -v --debug --timeout 1 --param external=1 > %t.extsh.out 2> %t.extsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.extsh.out %s
# RUN: FileCheck --check-prefix=CHECK-EXTSH-ERR < %t.extsh.err %s
#
# CHECK-EXTSH-ERR: Using external shell
# Test per test timeout using internal shell
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: -j 1 -v --debug --timeout 1 --param external=0 > %t.intsh.out 2> %t.intsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-OUT < %t.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-ERR < %t.intsh.err %s
# CHECK-INTSH-OUT: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-INTSH-OUT: command reached timeout: True
# CHECK-INTSH-ERR: Using internal shell
# Test per test timeout set via a config file rather than on the command line
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: -j 1 -v --debug --param external=0 \
# RUN: --param set_timeout=1 > %t.cfgset.out 2> %t.cfgset.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.cfgset.out %s
# RUN: FileCheck --check-prefix=CHECK-CFGSET-ERR < %t.cfgset.err %s
#
# CHECK-CFGSET-ERR: Using internal shell
# CHECK-OUT-COMMON: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-OUT-COMMON: Timeout: Reached timeout of 1 seconds
# CHECK-OUT-COMMON: Timed Out: 1
###############################################################################
# Check tests can complete in with a timeout set
#
# `short.py` should execute quickly so we shouldn't wait anywhere near the
# 3600 second timeout.
###############################################################################
# Test per test timeout using external shell
# RUN: %{lit} \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --timeout 3600 --param external=1 > %t.pass.extsh.out 2> %t.pass.extsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON-SHORT < %t.pass.extsh.out %s
# RUN: FileCheck --check-prefix=CHECK-EXTSH-ERR < %t.pass.extsh.err %s
# Test per test timeout using internal shell
# RUN: %{lit} \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --timeout 3600 --param external=0 > %t.pass.intsh.out 2> %t.pass.intsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON-SHORT < %t.pass.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-ERR < %t.pass.intsh.err %s
# CHECK-OUT-COMMON-SHORT: PASS: per_test_timeout :: short.py
# CHECK-OUT-COMMON-SHORT: Passed: 1
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: %{lit} \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --param external=0 \
# RUN: --param set_timeout=1 --timeout=3600 > %t.pass.cmdover.out 2> %t.pass.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON-SHORT < %t.pass.cmdover.out %s
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-ERR < %t.pass.cmdover.err %s
# CHECK-CMDLINE-OVERRIDE-ERR: Forcing timeout to be 3600 seconds
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-timeout.py |
# Check the behavior of the ALLOW_RETRIES keyword.
# This test uses a file that's stable across retries of the test to fail and
# only succeed the fourth time it is retried.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
# CHECK-TEST1: Passed With Retry: 1
# Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
# CHECK-TEST2: Passed With Retry: 1
# This test does not succeed within the allowed retry limit
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s
# CHECK-TEST3: Failed Tests (1):
# CHECK-TEST3: allow-retries :: does-not-succeed-within-limit.py
# This test should be UNRESOLVED since it has more than one ALLOW_RETRIES
# lines, and that is not allowed.
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/more-than-one-allow-retries-lines.py | FileCheck --check-prefix=CHECK-TEST4 %s
# CHECK-TEST4: Unresolved Tests (1):
# CHECK-TEST4: allow-retries :: more-than-one-allow-retries-lines.py
# This test does not provide a valid integer to the ALLOW_RETRIES keyword.
# It should be unresolved.
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/not-a-valid-integer.py | FileCheck --check-prefix=CHECK-TEST5 %s
# CHECK-TEST5: Unresolved Tests (1):
# CHECK-TEST5: allow-retries :: not-a-valid-integer.py
# This test checks that the config-wide test_retry_attempts property is used
# when no ALLOW_RETRIES keyword is present.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
# CHECK-TEST6: Passed With Retry: 1
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/allow-retries.py |
# Just run the ShUtil unit tests.
#
# RUN: %{python} -m lit.ShUtil
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shell-parsing.py |
# Check the basic discovery process, including a sub-suite.
#
# RUN: %{lit} %{inputs}/discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
#
# CHECK-BASIC-ERR: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-BASIC-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}'
# CHECK-BASIC-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}'
#
# CHECK-BASIC-OUT: -- Test Suites --
# CHECK-BASIC-OUT: sub-suite - 2 tests
# CHECK-BASIC-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-BASIC-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-BASIC-OUT: top-level-suite - 3 tests
# CHECK-BASIC-OUT: Source Root: {{.*[/\\]discovery$}}
# CHECK-BASIC-OUT: Exec Root : {{.*[/\\]discovery$}}
# CHECK-BASIC-OUT: Available Features: feature1 feature2
# CHECK-BASIC-OUT: Available Substitutions: %key1 => value1
# CHECK-BASIC-OUT: %key2 => value2
#
# CHECK-BASIC-OUT: -- Available Tests --
# CHECK-BASIC-OUT: sub-suite :: test-one
# CHECK-BASIC-OUT: sub-suite :: test-two
# CHECK-BASIC-OUT: top-level-suite :: subdir/test-three
# CHECK-BASIC-OUT: top-level-suite :: test-one
# CHECK-BASIC-OUT: top-level-suite :: test-two
# Check discovery when providing the special builtin 'config_map'
# RUN: %{python} %{inputs}/config-map-discovery/driver.py \
# RUN: %{inputs}/config-map-discovery/main-config/lit.cfg \
# RUN: %{inputs}/config-map-discovery/lit.alt.cfg \
# RUN: --workers=1 --debug --show-tests --show-suites > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-CONFIG-MAP-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-CONFIG-MAP-ERR < %t.err %s
# CHECK-CONFIG-MAP-OUT-NOT: ERROR: lit.cfg invoked
# CHECK-CONFIG-MAP-OUT: -- Test Suites --
# CHECK-CONFIG-MAP-OUT: config-map - 2 tests
# CHECK-CONFIG-MAP-OUT: Source Root: {{.*[/\\]config-map-discovery[/\\]tests}}
# CHECK-CONFIG-MAP-OUT: Exec Root : {{.*[/\\]tests[/\\]Inputs[/\\]config-map-discovery}}
# CHECK-CONFIG-MAP-OUT: -- Available Tests --
# CHECK-CONFIG-MAP-OUT-NOT: invalid-test.txt
# CHECK-CONFIG-MAP-OUT: config-map :: test1.txt
# CHECK-CONFIG-MAP-OUT: config-map :: test2.txt
# CHECK-CONFIG-MAP-ERR: loading suite config '{{.*}}lit.alt.cfg'
# CHECK-CONFIG-MAP-ERR: loaded config '{{.*}}lit.alt.cfg'
# CHECK-CONFIG-MAP-ERR: resolved input '{{.*(/|\\\\)config-map-discovery(/|\\\\)main-config}}' to 'config-map'::()
# Check discovery when tests are named directly.
#
# RUN: %{lit} \
# RUN: %{inputs}/discovery/subdir/test-three.py \
# RUN: %{inputs}/discovery/subsuite/test-one.txt \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-DIRECT-TEST < %t.out %s
#
# CHECK-DIRECT-TEST: -- Available Tests --
# CHECK-DIRECT-TEST: sub-suite :: test-one
# CHECK-DIRECT-TEST: top-level-suite :: subdir/test-three
# Check discovery when config files end in .py
# RUN: %{lit} %{inputs}/py-config-discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-PYCONFIG-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-PYCONFIG-ERR < %t.err %s
#
# CHECK-PYCONFIG-ERR: loading suite config '{{.*(/|\\\\)py-config-discovery(/|\\\\)lit.site.cfg.py}}'
# CHECK-PYCONFIG-ERR: load_config from '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-PYCONFIG-ERR: loaded config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-PYCONFIG-ERR: loaded config '{{.*(/|\\\\)py-config-discovery(/|\\\\)lit.site.cfg.py}}'
# CHECK-PYCONFIG-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}'
# CHECK-PYCONFIG-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}'
#
# CHECK-PYCONFIG-OUT: -- Test Suites --
# CHECK-PYCONFIG-OUT: sub-suite - 2 tests
# CHECK-PYCONFIG-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-PYCONFIG-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-PYCONFIG-OUT: top-level-suite - 3 tests
# CHECK-PYCONFIG-OUT: Source Root: {{.*[/\\]discovery$}}
# CHECK-PYCONFIG-OUT: Exec Root : {{.*[/\\]py-config-discovery$}}
#
# CHECK-PYCONFIG-OUT: -- Available Tests --
# CHECK-PYCONFIG-OUT: sub-suite :: test-one
# CHECK-PYCONFIG-OUT: sub-suite :: test-two
# CHECK-PYCONFIG-OUT: top-level-suite :: subdir/test-three
# CHECK-PYCONFIG-OUT: top-level-suite :: test-one
# CHECK-PYCONFIG-OUT: top-level-suite :: test-two
# Check discovery when using an exec path.
#
# RUN: %{lit} %{inputs}/exec-discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s
#
# CHECK-ASEXEC-ERR: loading suite config '{{.*(/|\\\\)exec-discovery(/|\\\\)lit.site.cfg}}'
# CHECK-ASEXEC-ERR: load_config from '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-ASEXEC-ERR: loaded config '{{.*(/|\\\\)discovery(/|\\\\)lit.cfg}}'
# CHECK-ASEXEC-ERR: loaded config '{{.*(/|\\\\)exec-discovery(/|\\\\)lit.site.cfg}}'
# CHECK-ASEXEC-ERR-DAG: loading suite config '{{.*(/|\\\\)discovery(/|\\\\)subsuite(/|\\\\)lit.cfg}}'
# CHECK-ASEXEC-ERR-DAG: loading local config '{{.*(/|\\\\)discovery(/|\\\\)subdir(/|\\\\)lit.local.cfg}}'
#
# CHECK-ASEXEC-OUT: -- Test Suites --
# CHECK-ASEXEC-OUT: sub-suite - 2 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*[/\\]discovery[/\\]subsuite$}}
# CHECK-ASEXEC-OUT: top-level-suite - 3 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*[/\\]discovery$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*[/\\]exec-discovery$}}
#
# CHECK-ASEXEC-OUT: -- Available Tests --
# CHECK-ASEXEC-OUT: sub-suite :: test-one
# CHECK-ASEXEC-OUT: sub-suite :: test-two
# CHECK-ASEXEC-OUT: top-level-suite :: subdir/test-three
# CHECK-ASEXEC-OUT: top-level-suite :: test-one
# CHECK-ASEXEC-OUT: top-level-suite :: test-two
# Check discovery when tests are named directly.
#
# FIXME: Note that using a path into a subsuite doesn't work correctly here.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery/subdir/test-three.py \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-DIRECT-TEST < %t.out %s
#
# CHECK-ASEXEC-DIRECT-TEST: -- Available Tests --
# CHECK-ASEXEC-DIRECT-TEST: top-level-suite :: subdir/test-three
# Check an error is emitted when the directly named test would not be run
# indirectly (e.g. when the directory containing the test is specified).
#
# RUN: not %{lit} \
# RUN: %{inputs}/discovery/test.not-txt -j 1 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-ERROR-INDIRECT-RUN-CHECK < %t.err %s
#
# CHECK-ERROR-INDIRECT-RUN-CHECK: error: 'top-level-suite :: test.not-txt' would not be run indirectly
# Check that no error is emitted with --no-indirectly-run-check.
#
# RUN: %{lit} \
# RUN: %{inputs}/discovery/test.not-txt -j 1 --no-indirectly-run-check
# Check that we don't recurse infinitely when loading an site specific test
# suite located inside the test source root.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery-in-tree/obj/ \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# Try it again after cd'ing into the test suite using a short relative path.
#
# RUN: cd %{inputs}/exec-discovery-in-tree/obj/
# RUN: %{lit} . \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests
# CHECK-ASEXEC-INTREE-NEXT: Source Root: {{.*[/\\]exec-discovery-in-tree$}}
# CHECK-ASEXEC-INTREE-NEXT: Exec Root : {{.*[/\\]exec-discovery-in-tree[/\\]obj$}}
# CHECK-ASEXEC-INTREE-NEXT: Available Features:
# CHECK-ASEXEC-INTREE-NEXT: Available Substitutions:
# CHECK-ASEXEC-INTREE-NEXT: -- Available Tests --
# CHECK-ASEXEC-INTREE-NEXT: exec-discovery-in-tree-suite :: test-one
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/discovery.py |
# Check the not command
#
# RUN: not %{lit} -j 1 -a -v %{inputs}/shtest-not \
# RUN: | FileCheck -match-full-lines %s
#
# END.
# Make sure not and env commands are included in printed commands.
# CHECK: -- Testing: 13 tests{{.*}}
# CHECK: FAIL: shtest-not :: not-args-last-is-crash.txt {{.*}}
# CHECK: $ "not" "--crash"
# CHECK: Error: 'not' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-args-nested-none.txt {{.*}}
# CHECK: $ "not" "not" "not"
# CHECK: Error: 'not' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-args-none.txt {{.*}}
# CHECK: $ "not"
# CHECK: Error: 'not' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-calls-cd.txt {{.*}}
# CHECK: $ "not" "not" "cd" "foobar"
# CHECK: $ "not" "--crash" "cd" "foobar"
# CHECK: Error: 'not --crash' cannot call 'cd'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-calls-colon.txt {{.*}}
# CHECK: $ "not" "not" ":" "foobar"
# CHECK: $ "not" "--crash" ":"
# CHECK: Error: 'not --crash' cannot call ':'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-calls-diff-with-crash.txt {{.*}}
# CHECK: $ "not" "--crash" "diff" "-u" {{.*}}
# CHECK-NOT: "$"
# CHECK-NOT: {{[Ee]rror}}
# CHECK: error: command failed with exit status: {{.*}}
# CHECK-NOT: {{[Ee]rror}}
# CHECK-NOT: "$"
# CHECK: FAIL: shtest-not :: not-calls-diff.txt {{.*}}
# CHECK: $ "not" "diff" {{.*}}
# CHECK: $ "not" "not" "not" "diff" {{.*}}
# CHECK: $ "not" "not" "not" "not" "not" "diff" {{.*}}
# CHECK: $ "diff" {{.*}}
# CHECK: $ "not" "not" "diff" {{.*}}
# CHECK: $ "not" "not" "not" "not" "diff" {{.*}}
# CHECK: $ "not" "diff" {{.*}}
# CHECK-NOT: "$"
# CHECK: FAIL: shtest-not :: not-calls-echo.txt {{.*}}
# CHECK: $ "not" "not" "echo" "hello" "world"
# CHECK: $ "not" "--crash" "echo" "hello" "world"
# CHECK: Error: 'not --crash' cannot call 'echo'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-calls-env-builtin.txt {{.*}}
# CHECK: $ "not" "--crash" "env" "-u" "FOO" "BAR=3" "rm" "{{.*}}.no-such-file"
# CHECK: Error: 'env' cannot call 'rm'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-calls-export.txt {{.*}}
# CHECK: $ "not" "not" "export" "FOO=1"
# CHECK: $ "not" "--crash" "export" "BAZ=3"
# CHECK: Error: 'not --crash' cannot call 'export'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: PASS: shtest-not :: not-calls-external.txt {{.*}}
# CHECK: $ "not" "{{[^"]*}}" "fail.py"
# CHECK: $ "not" "not" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "not" "not" "{{[^"]*}}" "fail.py"
# CHECK: $ "not" "not" "not" "not" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "not" "--crash" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "not" "--crash" "{{[^"]*}}" "fail.py"
# CHECK: $ "not" "not" "--crash" "not" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "not" "--crash" "not" "{{[^"]*}}" "fail.py"
# CHECK: $ "env" "not" "{{[^"]*}}" "fail.py"
# CHECK: $ "not" "env" "{{[^"]*}}" "fail.py"
# CHECK: $ "env" "FOO=1" "not" "{{[^"]*}}" "fail.py"
# CHECK: $ "not" "env" "FOO=1" "BAR=1" "{{[^"]*}}" "fail.py"
# CHECK: $ "env" "FOO=1" "BAR=1" "not" "env" "-u" "FOO" "BAR=2" "{{[^"]*}}" "fail.py"
# CHECK: $ "not" "env" "FOO=1" "BAR=1" "not" "env" "-u" "FOO" "-u" "BAR" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "not" "env" "FOO=1" "env" "FOO=2" "BAR=1" "{{[^"]*}}" "pass.py"
# CHECK: $ "env" "FOO=1" "-u" "BAR" "env" "-u" "FOO" "BAR=1" "not" "not" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "env" "FOO=1" "BAR=1" "env" "FOO=2" "BAR=2" "not" "--crash" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "env" "FOO=1" "BAR=1" "not" "--crash" "not" "{{[^"]*}}" "pass.py"
# CHECK: $ "not" "not" "--crash" "env" "-u" "BAR" "not" "env" "-u" "FOO" "BAR=1" "{{[^"]*}}" "pass.py"
# CHECK: FAIL: shtest-not :: not-calls-mkdir.txt {{.*}}
# CHECK: $ "not" "mkdir" {{.*}}
# CHECK: $ "not" "--crash" "mkdir" "foobar"
# CHECK: Error: 'not --crash' cannot call 'mkdir'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-not :: not-calls-rm.txt {{.*}}
# CHECK: $ "not" "rm" {{.*}}
# CHECK: $ "not" "--crash" "rm" "foobar"
# CHECK: Error: 'not --crash' cannot call 'rm'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: Passed: 1
# CHECK: Failed: 12
# CHECK-NOT: {{.}}
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-not.py |
# Check for correct error message when discovery of tests fails.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-discovery-failed > %t.cmd.out
# RUN: FileCheck < %t.cmd.out %s
# CHECK: -- Testing:
# CHECK: Failed Tests (1):
# CHECK: googletest-discovery-failed :: subdir/OneTest.py/failed_to_discover_tests_from_gtest
# CHECK: Failed: 1
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/googletest-discovery-failed.py |
# Check that -vv makes the line number of the failing RUN command clear.
# (-v is actually sufficient in the case of the internal shell.)
#
# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# CHECK: Testing: 4 tests
# In the case of the external shell, we check for only RUN lines in stderr in
# case some shell implementations format "set -x" output differently.
# CHECK-LABEL: FAIL: shtest-run-at-line :: external-shell/basic.txt
# CHECK: Script:
# CHECK: RUN: at line 4{{.*}} true
# CHECK-NEXT: RUN: at line 5{{.*}} false
# CHECK-NEXT: RUN: at line 6{{.*}} true
# CHECK: RUN: at line 4
# CHECK: RUN: at line 5
# CHECK-NOT: RUN
# CHECK-LABEL: FAIL: shtest-run-at-line :: external-shell/line-continuation.txt
# CHECK: Script:
# CHECK: RUN: at line 4{{.*}} echo 'foo bar' | FileCheck
# CHECK-NEXT: RUN: at line 6{{.*}} echo 'foo baz' | FileCheck
# CHECK-NEXT: RUN: at line 9{{.*}} echo 'foo bar' | FileCheck
# CHECK: RUN: at line 4
# CHECK: RUN: at line 6
# CHECK-NOT: RUN
# CHECK-LABEL: FAIL: shtest-run-at-line :: internal-shell/basic.txt
# CHECK: Script:
# CHECK: : 'RUN: at line 1'; true
# CHECK-NEXT: : 'RUN: at line 2'; false
# CHECK-NEXT: : 'RUN: at line 3'; true
# CHECK: Command Output (stdout)
# CHECK: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ "true"
# CHECK-NEXT: $ ":" "RUN: at line 2"
# CHECK-NEXT: $ "false"
# CHECK-NOT: RUN
# CHECK-LABEL: FAIL: shtest-run-at-line :: internal-shell/line-continuation.txt
# CHECK: Script:
# CHECK: : 'RUN: at line 1'; : first line continued to second line
# CHECK-NEXT: : 'RUN: at line 3'; echo 'foo bar' | FileCheck
# CHECK-NEXT: : 'RUN: at line 5'; echo 'foo baz' | FileCheck
# CHECK-NEXT: : 'RUN: at line 8'; echo 'foo bar' | FileCheck
# CHECK: Command Output (stdout)
# CHECK: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ ":" "first" "line" "continued" "to" "second" "line"
# CHECK-NEXT: $ ":" "RUN: at line 3"
# CHECK-NEXT: $ "echo" "foo bar"
# CHECK-NEXT: $ "FileCheck" "{{.*}}"
# CHECK-NEXT: $ ":" "RUN: at line 5"
# CHECK-NEXT: $ "echo" "foo baz"
# CHECK-NEXT: $ "FileCheck" "{{.*}}"
# CHECK-NOT: RUN
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-run-at-line.py |
# Check the internal shell handling component of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.out
# RUN: FileCheck --input-file %t.out %s
#
# Test again in non-UTF shell to catch potential errors with python 2 seen
# on stdout-encoding.txt
# RUN: env PYTHONIOENCODING=ascii not %{lit} -j 1 -a %{inputs}/shtest-shell > %t.ascii.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.ascii.out
# RUN: FileCheck --input-file %t.ascii.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-shell :: cat-error-0.txt
# CHECK: *** TEST 'shtest-shell :: cat-error-0.txt' FAILED ***
# CHECK: $ "cat" "-b" "temp1.txt"
# CHECK: # command stderr:
# CHECK: Unsupported: 'cat': option -b not recognized
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: cat-error-1.txt
# CHECK: *** TEST 'shtest-shell :: cat-error-1.txt' FAILED ***
# CHECK: $ "cat" "temp1.txt"
# CHECK: # command stderr:
# CHECK: [Errno 2] No such file or directory: 'temp1.txt'
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: colon-error.txt
# CHECK: *** TEST 'shtest-shell :: colon-error.txt' FAILED ***
# CHECK: $ ":"
# CHECK: # command stderr:
# CHECK: Unsupported: ':' cannot be part of a pipeline
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# CHECK: PASS: shtest-shell :: dev-null.txt
# CHECK: FAIL: shtest-shell :: diff-b.txt
# CHECK: *** TEST 'shtest-shell :: diff-b.txt' FAILED ***
# CHECK: $ "diff" "-b" "{{[^"]*}}.0" "{{[^"]*}}.1"
# CHECK: # command output:
# CHECK: 1,2
# CHECK-NEXT: {{^ }}f o o
# CHECK-NEXT: ! b a r
# CHECK-NEXT: ---
# CHECK-NEXT: {{^ }}f o o
# CHECK-NEXT: ! bar
# CHECK-EMPTY:
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-encodings.txt
# CHECK: *** TEST 'shtest-shell :: diff-encodings.txt' FAILED ***
# CHECK: $ "diff" "-u" "diff-in.bin" "diff-in.bin"
# CHECK-NOT: error
# CHECK: $ "diff" "-u" "diff-in.utf16" "diff-in.bin"
# CHECK: # command output:
# CHECK-NEXT: ---
# CHECK-NEXT: +++
# CHECK-NEXT: @@
# CHECK-NEXT: {{^ .f.o.o.$}}
# CHECK-NEXT: {{^-.b.a.r.$}}
# CHECK-NEXT: {{^\+.b.a.r.}}
# CHECK-NEXT: {{^ .b.a.z.$}}
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-u" "diff-in.utf8" "diff-in.bin"
# CHECK: # command output:
# CHECK-NEXT: ---
# CHECK-NEXT: +++
# CHECK-NEXT: @@
# CHECK-NEXT: -foo
# CHECK-NEXT: -bar
# CHECK-NEXT: -baz
# CHECK-NEXT: {{^\+.f.o.o.$}}
# CHECK-NEXT: {{^\+.b.a.r.}}
# CHECK-NEXT: {{^\+.b.a.z.$}}
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-u" "diff-in.bin" "diff-in.utf8"
# CHECK: # command output:
# CHECK-NEXT: ---
# CHECK-NEXT: +++
# CHECK-NEXT: @@
# CHECK-NEXT: {{^\-.f.o.o.$}}
# CHECK-NEXT: {{^\-.b.a.r.}}
# CHECK-NEXT: {{^\-.b.a.z.$}}
# CHECK-NEXT: +foo
# CHECK-NEXT: +bar
# CHECK-NEXT: +baz
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "cat" "diff-in.bin"
# CHECK-NOT: error
# CHECK: $ "diff" "-u" "-" "diff-in.bin"
# CHECK-NOT: error
# CHECK: $ "cat" "diff-in.bin"
# CHECK-NOT: error
# CHECK: $ "diff" "-u" "diff-in.bin" "-"
# CHECK-NOT: error
# CHECK: $ "cat" "diff-in.bin"
# CHECK-NOT: error
# CHECK: $ "diff" "-u" "diff-in.utf16" "-"
# CHECK: # command output:
# CHECK-NEXT: ---
# CHECK-NEXT: +++
# CHECK-NEXT: @@
# CHECK-NEXT: {{^ .f.o.o.$}}
# CHECK-NEXT: {{^-.b.a.r.$}}
# CHECK-NEXT: {{^\+.b.a.r.}}
# CHECK-NEXT: {{^ .b.a.z.$}}
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "cat" "diff-in.bin"
# CHECK-NOT: error
# CHECK: $ "diff" "-u" "diff-in.utf8" "-"
# CHECK: # command output:
# CHECK-NEXT: ---
# CHECK-NEXT: +++
# CHECK-NEXT: @@
# CHECK-NEXT: -foo
# CHECK-NEXT: -bar
# CHECK-NEXT: -baz
# CHECK-NEXT: {{^\+.f.o.o.$}}
# CHECK-NEXT: {{^\+.b.a.r.}}
# CHECK-NEXT: {{^\+.b.a.z.$}}
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-u" "-" "diff-in.utf8"
# CHECK: # command output:
# CHECK-NEXT: ---
# CHECK-NEXT: +++
# CHECK-NEXT: @@
# CHECK-NEXT: {{^\-.f.o.o.$}}
# CHECK-NEXT: {{^\-.b.a.r.}}
# CHECK-NEXT: {{^\-.b.a.z.$}}
# CHECK-NEXT: +foo
# CHECK-NEXT: +bar
# CHECK-NEXT: +baz
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "false"
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-error-1.txt
# CHECK: *** TEST 'shtest-shell :: diff-error-1.txt' FAILED ***
# CHECK: $ "diff" "-B" "temp1.txt" "temp2.txt"
# CHECK: # command stderr:
# CHECK: Unsupported: 'diff': option -B not recognized
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-error-2.txt
# CHECK: *** TEST 'shtest-shell :: diff-error-2.txt' FAILED ***
# CHECK: $ "diff" "temp.txt"
# CHECK: # command stderr:
# CHECK: Error: missing or extra operand
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-error-3.txt
# CHECK: *** TEST 'shtest-shell :: diff-error-3.txt' FAILED ***
# CHECK: $ "diff" "temp.txt" "temp1.txt"
# CHECK: # command stderr:
# CHECK: Error: 'diff' command failed
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-error-4.txt
# CHECK: *** TEST 'shtest-shell :: diff-error-4.txt' FAILED ***
# CHECK: Exit Code: 1
# CHECK: # command output:
# CHECK: diff-error-4.txt.tmp
# CHECK: diff-error-4.txt.tmp1
# CHECK: *** 1 ****
# CHECK: ! hello-first
# CHECK: --- 1 ----
# CHECK: ! hello-second
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-error-5.txt
# CHECK: *** TEST 'shtest-shell :: diff-error-5.txt' FAILED ***
# CHECK: $ "diff"
# CHECK: # command stderr:
# CHECK: Error: missing or extra operand
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-error-6.txt
# CHECK: *** TEST 'shtest-shell :: diff-error-6.txt' FAILED ***
# CHECK: $ "diff"
# CHECK: # command stderr:
# CHECK: Error: missing or extra operand
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-pipes.txt
# CHECK: *** TEST 'shtest-shell :: diff-pipes.txt' FAILED ***
# CHECK: $ "diff" "{{[^"]*}}.foo" "{{[^"]*}}.foo"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "FileCheck"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "diff" "-u" "{{[^"]*}}.foo" "{{[^"]*}}.bar"
# CHECK: note: command had no output on stdout or stderr
# CHECK: error: command failed with exit status: 1
# CHECK: $ "FileCheck"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "true"
# CHECK: $ "cat" "{{[^"]*}}.foo"
# CHECK: $ "diff" "-u" "-" "{{[^"]*}}.foo"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "cat" "{{[^"]*}}.foo"
# CHECK: $ "diff" "-u" "{{[^"]*}}.foo" "-"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "cat" "{{[^"]*}}.bar"
# CHECK: $ "diff" "-u" "{{[^"]*}}.foo" "-"
# CHECK: # command output:
# CHECK: @@
# CHECK-NEXT: -foo
# CHECK-NEXT: +bar
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "cat" "{{[^"]*}}.bar"
# CHECK: $ "diff" "-u" "-" "{{[^"]*}}.foo"
# CHECK: # command output:
# CHECK: @@
# CHECK-NEXT: -bar
# CHECK-NEXT: +foo
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "cat" "{{[^"]*}}.foo"
# CHECK: $ "diff" "-" "{{[^"]*}}.foo"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "FileCheck"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "cat" "{{[^"]*}}.bar"
# CHECK: $ "diff" "-u" "{{[^"]*}}.foo" "-"
# CHECK: note: command had no output on stdout or stderr
# CHECK: error: command failed with exit status: 1
# CHECK: $ "FileCheck"
# CHECK-NOT: note
# CHECK-NOT: error
# CHECK: $ "true"
# CHECK: $ "false"
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-r-error-0.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-0.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: Only in {{.*}}dir1: dir1unique
# CHECK: Only in {{.*}}dir2: dir2unique
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-1.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-1.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: *** {{.*}}dir1{{.*}}subdir{{.*}}f01
# CHECK: --- {{.*}}dir2{{.*}}subdir{{.*}}f01
# CHECK: 12345
# CHECK: 00000
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-2.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-2.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: Only in {{.*}}dir2: extrafile
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-3.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-3.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: Only in {{.*}}dir1: extra_subdir
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-4.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-4.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: File {{.*}}dir1{{.*}}extra_subdir is a directory while file {{.*}}dir2{{.*}}extra_subdir is a regular file
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-5.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-5.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: Only in {{.*}}dir1: extra_subdir
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-6.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-6.txt' FAILED ***
# CHECK: $ "diff" "-r"
# CHECK: # command output:
# CHECK: File {{.*}}dir1{{.*}}extra_file is a regular empty file while file {{.*}}dir2{{.*}}extra_file is a directory
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-7.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-7.txt' FAILED ***
# CHECK: $ "diff" "-r" "-" "{{[^"]*}}"
# CHECK: # command stderr:
# CHECK: Error: cannot recursively compare '-'
# CHECK: error: command failed with exit status: 1
# CHECK: FAIL: shtest-shell :: diff-r-error-8.txt
# CHECK: *** TEST 'shtest-shell :: diff-r-error-8.txt' FAILED ***
# CHECK: $ "diff" "-r" "{{[^"]*}}" "-"
# CHECK: # command stderr:
# CHECK: Error: cannot recursively compare '-'
# CHECK: error: command failed with exit status: 1
# CHECK: PASS: shtest-shell :: diff-r.txt
# CHECK: FAIL: shtest-shell :: diff-strip-trailing-cr.txt
# CHECK: *** TEST 'shtest-shell :: diff-strip-trailing-cr.txt' FAILED ***
# CHECK: $ "diff" "-u" "diff-in.dos" "diff-in.unix"
# CHECK: # command output:
# CHECK: @@
# CHECK-NEXT: -In this file, the
# CHECK-NEXT: -sequence "\r\n"
# CHECK-NEXT: -terminates lines.
# CHECK-NEXT: +In this file, the
# CHECK-NEXT: +sequence "\n"
# CHECK-NEXT: +terminates lines.
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-u" "diff-in.unix" "diff-in.dos"
# CHECK: # command output:
# CHECK: @@
# CHECK-NEXT: -In this file, the
# CHECK-NEXT: -sequence "\n"
# CHECK-NEXT: -terminates lines.
# CHECK-NEXT: +In this file, the
# CHECK-NEXT: +sequence "\r\n"
# CHECK-NEXT: +terminates lines.
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-u" "--strip-trailing-cr" "diff-in.dos" "diff-in.unix"
# CHECK: # command output:
# CHECK: @@
# CHECK-NEXT: In this file, the
# CHECK-NEXT: -sequence "\r\n"
# CHECK-NEXT: +sequence "\n"
# CHECK-NEXT: terminates lines.
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-u" "--strip-trailing-cr" "diff-in.unix" "diff-in.dos"
# CHECK: # command output:
# CHECK: @@
# CHECK-NEXT: In this file, the
# CHECK-NEXT: -sequence "\n"
# CHECK-NEXT: +sequence "\r\n"
# CHECK-NEXT: terminates lines.
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "false"
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-unified.txt
# CHECK: *** TEST 'shtest-shell :: diff-unified.txt' FAILED ***
# CHECK: $ "diff" "-u" "{{[^"]*}}.foo" "{{[^"]*}}.bar"
# CHECK: # command output:
# CHECK: @@ {{.*}} @@
# CHECK-NEXT: 3
# CHECK-NEXT: 4
# CHECK-NEXT: 5
# CHECK-NEXT: -6 foo
# CHECK-NEXT: +6 bar
# CHECK-NEXT: 7
# CHECK-NEXT: 8
# CHECK-NEXT: 9
# CHECK-EMPTY:
# CHECK-NEXT: error: command failed with exit status: 1
# CHECK-NEXT: $ "true"
# CHECK: $ "diff" "-U" "2" "{{[^"]*}}.foo" "{{[^"]*}}.bar"
# CHECK: # command output:
# CHECK: @@ {{.*}} @@
# CHECK-NEXT: 4
# CHECK-NEXT: 5
# CHECK-NEXT: -6 foo
# CHECK-NEXT: +6 bar
# CHECK-NEXT: 7
# CHECK-NEXT: 8
# CHECK-EMPTY:
# CHECK-NEXT: error: command failed with exit status: 1
# CHECK-NEXT: $ "true"
# CHECK: $ "diff" "-U4" "{{[^"]*}}.foo" "{{[^"]*}}.bar"
# CHECK: # command output:
# CHECK: @@ {{.*}} @@
# CHECK-NEXT: 2
# CHECK-NEXT: 3
# CHECK-NEXT: 4
# CHECK-NEXT: 5
# CHECK-NEXT: -6 foo
# CHECK-NEXT: +6 bar
# CHECK-NEXT: 7
# CHECK-NEXT: 8
# CHECK-NEXT: 9
# CHECK-NEXT: 10
# CHECK-EMPTY:
# CHECK-NEXT: error: command failed with exit status: 1
# CHECK-NEXT: $ "true"
# CHECK: $ "diff" "-U0" "{{[^"]*}}.foo" "{{[^"]*}}.bar"
# CHECK: # command output:
# CHECK: @@ {{.*}} @@
# CHECK-NEXT: -6 foo
# CHECK-NEXT: +6 bar
# CHECK-EMPTY:
# CHECK-NEXT: error: command failed with exit status: 1
# CHECK-NEXT: $ "true"
# CHECK: $ "diff" "-U" "30.1" "{{[^"]*}}" "{{[^"]*}}"
# CHECK: # command stderr:
# CHECK: Error: invalid '-U' argument: 30.1
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "diff" "-U-1" "{{[^"]*}}" "{{[^"]*}}"
# CHECK: # command stderr:
# CHECK: Error: invalid '-U' argument: -1
# CHECK: error: command failed with exit status: 1
# CHECK: $ "true"
# CHECK: $ "false"
# CHECK: ***
# CHECK: FAIL: shtest-shell :: diff-w.txt
# CHECK: *** TEST 'shtest-shell :: diff-w.txt' FAILED ***
# CHECK: $ "diff" "-w" "{{[^"]*}}.0" "{{[^"]*}}.1"
# CHECK: # command output:
# CHECK: 1,3
# CHECK-NEXT: {{^ }}foo
# CHECK-NEXT: {{^ }}bar
# CHECK-NEXT: ! baz
# CHECK-NEXT: ---
# CHECK-NEXT: {{^ }}foo
# CHECK-NEXT: {{^ }}bar
# CHECK-NEXT: ! bat
# CHECK-EMPTY:
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: error-0.txt
# CHECK: *** TEST 'shtest-shell :: error-0.txt' FAILED ***
# CHECK: $ "not-a-real-command"
# CHECK: # command stderr:
# CHECK: 'not-a-real-command': command not found
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# FIXME: The output here sucks.
#
# CHECK: FAIL: shtest-shell :: error-1.txt
# CHECK: *** TEST 'shtest-shell :: error-1.txt' FAILED ***
# CHECK: shell parser error on: ': \'RUN: at line 3\'; echo "missing quote'
# CHECK: ***
# CHECK: FAIL: shtest-shell :: error-2.txt
# CHECK: *** TEST 'shtest-shell :: error-2.txt' FAILED ***
# CHECK: Unsupported redirect:
# CHECK: ***
# CHECK: FAIL: shtest-shell :: mkdir-error-0.txt
# CHECK: *** TEST 'shtest-shell :: mkdir-error-0.txt' FAILED ***
# CHECK: $ "mkdir" "-p" "temp"
# CHECK: # command stderr:
# CHECK: Unsupported: 'mkdir' cannot be part of a pipeline
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# CHECK: FAIL: shtest-shell :: mkdir-error-1.txt
# CHECK: *** TEST 'shtest-shell :: mkdir-error-1.txt' FAILED ***
# CHECK: $ "mkdir" "-p" "-m" "777" "temp"
# CHECK: # command stderr:
# CHECK: Unsupported: 'mkdir': option -m not recognized
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# CHECK: FAIL: shtest-shell :: mkdir-error-2.txt
# CHECK: *** TEST 'shtest-shell :: mkdir-error-2.txt' FAILED ***
# CHECK: $ "mkdir" "-p"
# CHECK: # command stderr:
# CHECK: Error: 'mkdir' is missing an operand
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# CHECK: PASS: shtest-shell :: redirects.txt
# CHECK: FAIL: shtest-shell :: rm-error-0.txt
# CHECK: *** TEST 'shtest-shell :: rm-error-0.txt' FAILED ***
# CHECK: $ "rm" "-rf" "temp"
# CHECK: # command stderr:
# CHECK: Unsupported: 'rm' cannot be part of a pipeline
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# CHECK: FAIL: shtest-shell :: rm-error-1.txt
# CHECK: *** TEST 'shtest-shell :: rm-error-1.txt' FAILED ***
# CHECK: $ "rm" "-f" "-v" "temp"
# CHECK: # command stderr:
# CHECK: Unsupported: 'rm': option -v not recognized
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# CHECK: FAIL: shtest-shell :: rm-error-2.txt
# CHECK: *** TEST 'shtest-shell :: rm-error-2.txt' FAILED ***
# CHECK: $ "rm" "-r" "hello"
# CHECK: # command stderr:
# CHECK: Error: 'rm' command failed
# CHECK: error: command failed with exit status: 1
# CHECK: ***
# CHECK: FAIL: shtest-shell :: rm-error-3.txt
# CHECK: *** TEST 'shtest-shell :: rm-error-3.txt' FAILED ***
# CHECK: Exit Code: 1
# CHECK: ***
# CHECK: PASS: shtest-shell :: rm-unicode-0.txt
# CHECK: PASS: shtest-shell :: sequencing-0.txt
# CHECK: XFAIL: shtest-shell :: sequencing-1.txt
# CHECK: FAIL: shtest-shell :: stdout-encoding.txt
# CHECK: *** TEST 'shtest-shell :: stdout-encoding.txt' FAILED ***
# CHECK: $ "cat" "diff-in.bin"
# CHECK: # command output:
# CHECK-NEXT: {{^.f.o.o.$}}
# CHECK-NEXT: {{^.b.a.r.}}
# CHECK-NEXT: {{^.b.a.z.$}}
# CHECK-NOT: error
# CHECK: $ "false"
# CHECK: ***
# CHECK: PASS: shtest-shell :: valid-shell.txt
# CHECK: Failed Tests (35)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-shell.py |
# Test features related to formats which support reporting additional test data.
# and multiple test results.
# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s
# CHECK: -- Testing:
# CHECK: PASS: test-data-micro :: micro-tests.ini
# CHECK-NEXT: *** TEST 'test-data-micro :: micro-tests.ini' RESULTS ***
# CHECK-NEXT: value0: 1
# CHECK-NEXT: value1: 2.3456
# CHECK-NEXT: ***
# CHECK-NEXT: *** MICRO-TEST: test0
# CHECK-NEXT: micro_value0: 4
# CHECK-NEXT: micro_value1: 1.3
# CHECK-NEXT: *** MICRO-TEST: test1
# CHECK-NEXT: micro_value0: 4
# CHECK-NEXT: micro_value1: 1.3
# CHECK-NEXT: *** MICRO-TEST: test2
# CHECK-NEXT: micro_value0: 4
# CHECK-NEXT: micro_value1: 1.3
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/test-data-micro.py |
# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s
# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: }
# CHECK-NEXT: "name": "test-data :: metrics.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/test-output.py |
# Check the various features of the ShTest format.
#
# RUN: rm -f %t.xml
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out
# RUN: FileCheck < %t.out %s
# RUN: FileCheck --check-prefix=XUNIT < %t.xml %s
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
# CHECK-NEXT: --
# CHECK-NEXT: cat{{(\.exe)?}}: {{cannot open does-not-exist|does-not-exist: No such file or directory}}
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_control_chars.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_control_chars.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with {{.*}}control characters{{.*}}.
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: printf "line 1
# CHECK-NEXT: false
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ "printf"
# CHECK-NEXT: # command output:
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNRESOLVED: shtest-format :: requires-star.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-triple.txt
# CHECK: PASS: shtest-format :: unsupported-expr-false.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported-expr-true.txt
# CHECK: UNRESOLVED: shtest-format :: unsupported-star.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: PASS: shtest-format :: xfail-expr-false.txt
# CHECK: XFAIL: shtest-format :: xfail-expr-true.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED ***
# CHECK-NEXT: Script
# CHECK-NEXT: --
# CHECK-NEXT: true
# CHECK-NEXT: --
# CHECK: Failed Tests (4)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: external_shell/fail_with_control_chars.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Unexpectedly Passed Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Testing Time:
# CHECK: Unsupported : 4
# CHECK: Passed : 6
# CHECK: Expectedly Failed : 4
# CHECK: Unresolved : 3
# CHECK: Failed : 4
# CHECK: Unexpectedly Passed: 1
# XUNIT: <?xml version="1.0" encoding="UTF-8"?>
# XUNIT-NEXT: <testsuites time="{{[0-9.]+}}">
# XUNIT-NEXT: <testsuite name="shtest-format" tests="22" failures="8" skipped="4">
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail_with_bad_encoding.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail_with_control_chars.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure><![CDATA[Script:
# XUNIT: Command Output (stdout):
# XUNIT-NEXT: --
# XUNIT-NEXT: a line with [2;30;41mcontrol characters[0m.
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="fail.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="no-test-line.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-missing.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Missing required feature(s): a-missing-feature"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-present.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-star.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-triple.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Missing required feature(s): x86_64"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-true.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Unsupported configuration"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-star.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.unsupported_dir" name="some-test.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Unsupported configuration"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-true.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-feature.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-target.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xpass.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: </testsuite>
# XUNIT-NEXT: </testsuites>
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-format.py |
# Check the various features of the GoogleTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-format > %t.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestA
# CHECK: FAIL: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestB
# CHECK-NEXT: *** TEST 'googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestB' FAILED ***
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK: ***
# CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/1.subTest
# CHECK: Failed Tests (1)
# CHECK: Passed: 3
# CHECK: Failed: 1
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/googletest-format.py |
# RUN: %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-BASIC %s
# CHECK-BASIC: Testing: 5 tests
# Check that we exit with an error if we do not discover any tests, even with --allow-empty-runs.
#
# RUN: not %{lit} %{inputs}/nonexistent 2>&1 | FileCheck --check-prefix=CHECK-BAD-PATH %s
# RUN: not %{lit} %{inputs}/nonexistent --allow-empty-runs 2>&1 | FileCheck --check-prefix=CHECK-BAD-PATH %s
# CHECK-BAD-PATH: error: did not discover any tests for provided path(s)
# Check that we exit with an error if we filter out all tests, but allow it with --allow-empty-runs.
#
# RUN: not %{lit} --filter 'nonexistent' %{inputs}/discovery 2>&1 | FileCheck --check-prefixes=CHECK-BAD-FILTER,CHECK-BAD-FILTER-ERROR %s
# RUN: %{lit} --filter 'nonexistent' --allow-empty-runs %{inputs}/discovery 2>&1 | FileCheck --check-prefixes=CHECK-BAD-FILTER,CHECK-BAD-FILTER-ALLOW %s
# CHECK-BAD-FILTER: error: filter did not match any tests (of 5 discovered).
# CHECK-BAD-FILTER-ERROR: Use '--allow-empty-runs' to suppress this error.
# CHECK-BAD-FILTER-ALLOW: Suppressing error because '--allow-empty-runs' was specified.
# Check that regex-filtering works, is case-insensitive, and can be configured via env var.
#
# RUN: %{lit} --filter 'o[a-z]e' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# CHECK-FILTER: Testing: 2 of 5 tests
# CHECK-FILTER: Excluded: 3
# Check that maximum counts work
#
# RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
# CHECK-MAX: Testing: 3 of 5 tests
# CHECK-MAX: Excluded: 2
# Check that sharding partitions the testsuite in a way that distributes the
# rounding error nicely (i.e. 5/3 => 2 2 1, not 1 1 3 or whatever)
#
# RUN: %{lit} --num-shards 3 --run-shard 1 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
# CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-OUT: Testing: 2 of 5 tests
# CHECK-SHARD0-OUT: Excluded: 3
#
# RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD1-OUT < %t.out %s
# CHECK-SHARD1-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
# CHECK-SHARD1-OUT: Testing: 2 of 5 tests
#
# RUN: %{lit} --num-shards 3 --run-shard 3 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD2-OUT < %t.out %s
# CHECK-SHARD2-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
# CHECK-SHARD2-OUT: Testing: 1 of 5 tests
# Check that sharding via env vars works.
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=1 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-OUT < %t.out %s
# CHECK-SHARD0-ENV-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-ENV-OUT: Testing: 2 of 5 tests
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=2 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-OUT < %t.out %s
# CHECK-SHARD1-ENV-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
# CHECK-SHARD1-ENV-OUT: Testing: 2 of 5 tests
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=3 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-OUT < %t.out %s
# CHECK-SHARD2-ENV-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
# CHECK-SHARD2-ENV-OUT: Testing: 1 of 5 tests
# Check that providing more shards than tests results in 1 test per shard
# until we run out, then 0.
#
# RUN: %{lit} --num-shards 100 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR1 < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT1 < %t.out %s
# CHECK-SHARD-BIG-ERR1: note: Selecting shard 2/100 = size 1/5 = tests #(100*k)+2 = [2]
# CHECK-SHARD-BIG-OUT1: Testing: 1 of 5 tests
#
# RUN: %{lit} --num-shards 100 --run-shard 6 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR2 < %t.err %s
# CHECK-SHARD-BIG-ERR2: note: Selecting shard 6/100 = size 0/5 = tests #(100*k)+6 = []
# CHECK-SHARD-BIG-ERR2: warning: shard does not contain any tests. Consider decreasing the number of shards.
#
# RUN: %{lit} --num-shards 100 --run-shard 50 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR3 < %t.err %s
# CHECK-SHARD-BIG-ERR3: note: Selecting shard 50/100 = size 0/5 = tests #(100*k)+50 = []
# CHECK-SHARD-BIG-ERR3: warning: shard does not contain any tests. Consider decreasing the number of shards.
# Check that range constraints are enforced
#
# RUN: not %{lit} --num-shards 0 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR < %t.err %s
# CHECK-SHARD-ERR: error: argument --num-shards: requires positive integer, but found '0'
#
# RUN: not %{lit} --num-shards 3 --run-shard 4 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR2 < %t.err %s
# CHECK-SHARD-ERR2: error: --run-shard must be between 1 and --num-shards (inclusive)
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/selecting.py |
# Check the simple progress bar.
#
# RUN: not %{lit} -j 1 -s %{inputs}/progress-bar > %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK: Testing:
# CHECK: FAIL: progress-bar :: test-1.txt (1 of 4)
# CHECK: Testing: 0.. 10.. 20
# CHECK: FAIL: progress-bar :: test-2.txt (2 of 4)
# CHECK: Testing: 0.. 10.. 20.. 30.. 40..
# CHECK: FAIL: progress-bar :: test-3.txt (3 of 4)
# CHECK: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70
# CHECK: FAIL: progress-bar :: test-4.txt (4 of 4)
# CHECK: Testing: 0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/progress-bar.py |
# UNSUPPORTED: system-windows
# Check the behavior of --max-failures option.
#
# RUN: not %{lit} -j 1 %{inputs}/max-failures > %t.out 2>&1
# RUN: not %{lit} --max-failures=1 -j 1 %{inputs}/max-failures >> %t.out 2>&1
# RUN: not %{lit} --max-failures=2 -j 1 %{inputs}/max-failures >> %t.out 2>&1
# RUN: not %{lit} --max-failures=0 -j 1 %{inputs}/max-failures 2>> %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK-NOT: reached maximum number of test failures
# CHECK-NOT: Skipped
# CHECK: Failed: 3
# CHECK: reached maximum number of test failures, skipping remaining tests
# CHECK: Skipped: 2
# CHECK: Failed : 1
# CHECK: reached maximum number of test failures, skipping remaining tests
# CHECK: Skipped: 1
# CHECK: Failed : 2
# CHECK: error: argument --max-failures: requires positive integer, but found '0'
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/max-failures.py |
# REQUIRES: lit-max-individual-test-time
###############################################################################
# Check tests can hit timeout when set
###############################################################################
# Check that the per test timeout is enforced when running GTest tests.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cmd.out %s
# Check that the per test timeout is enforced when running GTest tests via
# the configuration file
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --filter=InfiniteLoopSubTest --param set_timeout=1 \
# RUN: > %t.cfgset.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s
# CHECK-INF: -- Testing:
# CHECK-INF: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/T.InfiniteLoopSubTest
# CHECK-INF: Timed Out: 1
###############################################################################
# Check tests can complete with a timeout set
#
# `QuickSubTest` should execute quickly so we shouldn't wait anywhere near the
# 3600 second timeout.
###############################################################################
# RUN: %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --filter=QuickSubTest --timeout=3600 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmd.out %s
# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/T.QuickSubTest
# CHECK-QUICK: Passed : 1
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: %{lit} -j 1 -v %{inputs}/googletest-timeout --filter=QuickSubTest \
# RUN: --param set_timeout=1 --timeout=3600 \
# RUN: > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmdover.out %s
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-ERR < %t.cmdover.err %s
# CHECK-CMDLINE-OVERRIDE-ERR: Forcing timeout to be 3600 seconds
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/googletest-timeout.py |
# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-keyword-parse-errors > %t.out
# RUN: FileCheck -input-file %t.out %s
#
# END.
# CHECK: Testing: 3 tests
# CHECK-LABEL: UNRESOLVED: shtest-keyword-parse-errors :: empty.txt
# CHECK: {{^}}Test has no 'RUN:' line{{$}}
# CHECK-LABEL: UNRESOLVED: shtest-keyword-parse-errors :: multiple-allow-retries.txt
# CHECK: {{^}}Test has more than one ALLOW_RETRIES lines{{$}}
# CHECK-LABEL: UNRESOLVED: shtest-keyword-parse-errors :: unterminated-run.txt
# CHECK: {{^}}Test has unterminated 'RUN:' lines (with '\'){{$}}
| MDL-SDK-master | src/mdl/jit/llvm/dist/utils/lit/tests/shtest-keyword-parse-errors.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.