python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_diff - Compare two symbol lists and output the differences.
"""
from argparse import ArgumentParser
import sys
from libcudacxx.sym_check import diff, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument(
'--names-only', dest='names_only',
help='Only print symbol names',
action='store_true', default=False)
parser.add_argument(
'--removed-only', dest='removed_only',
help='Only print removed symbols',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--strict', dest='strict',
help="Exit with a non-zero status if any symbols "
"differ",
action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
parser.add_argument(
'--demangle', dest='demangle', action='store_true', default=False)
parser.add_argument(
'old_syms', metavar='old-syms', type=str,
help='The file containing the old symbol list or a library')
parser.add_argument(
'new_syms', metavar='new-syms', type=str,
help='The file containing the new symbol list or a library')
args = parser.parse_args()
old_syms_list = util.extract_or_load(args.old_syms)
new_syms_list = util.extract_or_load(args.new_syms)
if args.only_stdlib:
old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list)
new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list)
added, removed, changed = diff.diff(old_syms_list, new_syms_list)
if args.removed_only:
added = {}
report, is_break, is_different = diff.report_diff(
added, removed, changed, names_only=args.names_only,
demangle=args.demangle)
if args.output is None:
print(report)
else:
with open(args.output, 'w') as f:
f.write(report + '\n')
exit_code = 1 if is_break or (args.strict and is_different) else 0
sys.exit(exit_code)
if __name__ == '__main__':
main()
| libcudacxx-main | .upstream-tests/utils/sym_diff.py |
#!/usr/bin/env python
import os
import tempfile
def get_libcxx_paths():
utils_path = os.path.dirname(os.path.abspath(__file__))
script_name = os.path.basename(__file__)
assert os.path.exists(utils_path)
src_root = os.path.dirname(utils_path)
include_path = os.path.join(src_root, 'include')
assert os.path.exists(include_path)
docs_path = os.path.join(src_root, 'docs')
assert os.path.exists(docs_path)
macro_test_path = os.path.join(src_root, 'test', 'std', 'language.support',
'support.limits', 'support.limits.general')
assert os.path.exists(macro_test_path)
assert os.path.exists(os.path.join(macro_test_path, 'version.version.pass.cpp'))
return script_name, src_root, include_path, docs_path, macro_test_path
script_name, source_root, include_path, docs_path, macro_test_path = get_libcxx_paths()
def has_header(h):
h_path = os.path.join(include_path, h)
return os.path.exists(h_path)
def add_version_header(tc):
tc["headers"].append("version")
return tc
feature_test_macros = sorted([ add_version_header(x) for x in [
# C++14 macros
{"name": "__cpp_lib_integer_sequence",
"values": {
"c++14": 201304L
},
"headers": ["utility"],
},
{"name": "__cpp_lib_exchange_function",
"values": {
"c++14": 201304L
},
"headers": ["utility"],
},
{"name": "__cpp_lib_tuples_by_type",
"values": {
"c++14": 201304L
},
"headers": ["utility", "tuple"],
},
{"name": "__cpp_lib_tuple_element_t",
"values": {
"c++14": 201402L
},
"headers": ["tuple"],
},
{"name": "__cpp_lib_make_unique",
"values": {
"c++14": 201304L
},
"headers": ["memory"],
},
{"name": "__cpp_lib_transparent_operators",
"values": {
"c++14": 201210L,
"c++17": 201510L,
},
"headers": ["functional"],
},
{"name": "__cpp_lib_integral_constant_callable",
"values": {
"c++14": 201304L
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_transformation_trait_aliases",
"values": {
"c++14": 201304L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_result_of_sfinae",
"values": {
"c++14": 201210L,
},
"headers": ["functional", "type_traits"]
},
{"name": "__cpp_lib_is_final",
"values": {
"c++14": 201402L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_is_null_pointer",
"values": {
"c++14": 201309L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_chrono_udls",
"values": {
"c++14": 201304L,
},
"headers": ["chrono"]
},
{"name": "__cpp_lib_string_udls",
"values": {
"c++14": 201304L,
},
"headers": ["string"]
},
{"name": "__cpp_lib_generic_associative_lookup",
"values": {
"c++14": 201304L,
},
"headers": ["map", "set"]
},
{"name": "__cpp_lib_null_iterators",
"values": {
"c++14": 201304L,
},
"headers": ["iterator"]
},
{"name": "__cpp_lib_make_reverse_iterator",
"values": {
"c++14": 201402L,
},
"headers": ["iterator"]
},
{"name": "__cpp_lib_robust_nonmodifying_seq_ops",
"values": {
"c++14": 201304L,
},
"headers": ["algorithm"]
},
{"name": "__cpp_lib_complex_udls",
"values": {
"c++14": 201309L,
},
"headers": ["complex"]
},
{"name": "__cpp_lib_constexpr_complex",
"values": {
"c++14": 201711L
},
"headers": ["complex"],
},
{"name": "__cpp_lib_quoted_string_io",
"values": {
"c++14": 201304L,
},
"headers": ["iomanip"]
},
{"name": "__cpp_lib_shared_timed_mutex",
"values": {
"c++14": 201402L,
},
"headers": ["shared_mutex"],
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
# C++17 macros
{"name": "__cpp_lib_atomic_is_always_lock_free",
"values": {
"c++17": 201603L,
},
"headers": ["atomic"],
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
{"name": "__cpp_lib_filesystem",
"values": {
"c++17": 201703L,
},
"headers": ["filesystem"]
},
{"name": "__cpp_lib_invoke",
"values": {
"c++17": 201411L,
},
"headers": ["functional"]
},
{"name": "__cpp_lib_void_t",
"values": {
"c++17": 201411L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_node_extract",
"values": {
"c++17": 201606L,
},
"headers": ["map", "set", "unordered_map", "unordered_set"]
},
{"name": "__cpp_lib_byte",
"values": {
"c++17": 201603L,
},
"headers": ["cstddef"],
},
{"name": "__cpp_lib_hardware_interference_size",
"values": {
"c++17": 201703L,
},
"headers": ["new"],
},
{"name": "__cpp_lib_launder",
"values": {
"c++17": 201606L,
},
"headers": ["new"],
},
{"name": "__cpp_lib_uncaught_exceptions",
"values": {
"c++17": 201411L,
},
"headers": ["exception"],
},
{"name": "__cpp_lib_as_const",
"values": {
"c++17": 201510L,
},
"headers": ["utility"],
},
{"name": "__cpp_lib_make_from_tuple",
"values": {
"c++17": 201606L,
},
"headers": ["tuple"],
},
{"name": "__cpp_lib_apply",
"values": {
"c++17": 201603L,
},
"headers": ["tuple"],
},
{"name": "__cpp_lib_optional",
"values": {
"c++17": 201606L,
},
"headers": ["optional"],
},
{"name": "__cpp_lib_variant",
"values": {
"c++17": 201606L,
},
"headers": ["variant"],
},
{"name": "__cpp_lib_any",
"values": {
"c++17": 201606L,
},
"headers": ["any"],
},
{"name": "__cpp_lib_addressof_constexpr",
"values": {
"c++17": 201603L,
},
"headers": ["memory"],
"depends": "TEST_HAS_BUILTIN(__builtin_addressof) || TEST_GCC_VER >= 700",
"internal_depends": "defined(_LIBCUDACXX_ADDRESSOF)",
},
{"name": "__cpp_lib_raw_memory_algorithms",
"values": {
"c++17": 201606L,
},
"headers": ["memory"],
},
{"name": "__cpp_lib_enable_shared_from_this",
"values": {
"c++17": 201603L,
},
"headers": ["memory"],
},
{"name": "__cpp_lib_shared_ptr_weak_type",
"values": {
"c++17": 201606L,
},
"headers": ["memory"],
},
{"name": "__cpp_lib_shared_ptr_arrays",
"values": {
"c++17": 201611L,
},
"headers": ["memory"],
"unimplemented": True,
},
{"name": "__cpp_lib_memory_resource",
"values": {
"c++17": 201603L,
},
"headers": ["memory_resource"],
"unimplemented": True,
},
{"name": "__cpp_lib_boyer_moore_searcher",
"values": {
"c++17": 201603L,
},
"headers": ["functional"],
"unimplemented": True,
},
{"name": "__cpp_lib_not_fn",
"values": {
"c++17": 201603L,
},
"headers": ["functional"],
},
{"name": "__cpp_lib_bool_constant",
"values": {
"c++17": 201505L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_type_trait_variable_templates",
"values": {
"c++17": 201510L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_logical_traits",
"values": {
"c++17": 201510L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_is_swappable",
"values": {
"c++17": 201603L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_is_invocable",
"values": {
"c++17": 201703L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_has_unique_object_representations",
"values": {
"c++17": 201606L,
},
"headers": ["type_traits"],
"depends": "TEST_HAS_BUILTIN_IDENTIFIER(__has_unique_object_representations) || TEST_GCC_VER >= 700",
"internal_depends": "defined(_LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS)",
},
{"name": "__cpp_lib_is_aggregate",
"values": {
"c++17": 201703L,
},
"headers": ["type_traits"],
"depends": "TEST_HAS_BUILTIN_IDENTIFIER(__is_aggregate) || TEST_GCC_VER_NEW >= 7001",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_IS_AGGREGATE)",
},
{"name": "__cpp_lib_chrono",
"values": {
"c++17": 201611L,
},
"headers": ["chrono"],
},
{"name": "__cpp_lib_execution",
"values": {
"c++17": 201603L,
},
"headers": ["execution"],
"unimplemented": True
},
{"name": "__cpp_lib_parallel_algorithm",
"values": {
"c++17": 201603L,
},
"headers": ["algorithm", "numeric"],
"unimplemented": True,
},
{"name": "__cpp_lib_to_chars",
"values": {
"c++17": 201611L,
},
"headers": ["utility"],
"unimplemented": True,
},
{"name": "__cpp_lib_string_view",
"values": {
"c++17": 201606L,
},
"headers": ["string", "string_view"],
},
{"name": "__cpp_lib_allocator_traits_is_always_equal",
"values": {
"c++17": 201411L,
},
"headers": ["memory", "scoped_allocator", "string", "deque", "forward_list", "list", "vector", "map", "set", "unordered_map", "unordered_set"],
},
{"name": "__cpp_lib_incomplete_container_elements",
"values": {
"c++17": 201505L,
},
"headers": ["forward_list", "list", "vector"],
},
{"name": "__cpp_lib_map_try_emplace",
"values": {
"c++17": 201411L,
},
"headers": ["map"],
},
{"name": "__cpp_lib_unordered_map_try_emplace",
"values": {
"c++17": 201411L,
},
"headers": ["unordered_map"],
},
{"name": "__cpp_lib_array_constexpr",
"values": {
"c++17": 201603L,
},
"headers": ["iterator", "array"],
},
{"name": "__cpp_lib_nonmember_container_access",
"values": {
"c++17": 201411L,
},
"headers": ["iterator", "array", "deque", "forward_list", "list", "map", "regex",
"set", "string", "unordered_map", "unordered_set", "vector"],
},
{"name": "__cpp_lib_sample",
"values": {
"c++17": 201603L,
},
"headers": ["algorithm"],
},
{"name": "__cpp_lib_clamp",
"values": {
"c++17": 201603L,
},
"headers": ["algorithm"],
},
{"name": "__cpp_lib_gcd_lcm",
"values": {
"c++17": 201606L,
},
"headers": ["numeric"],
},
{"name": "__cpp_lib_hypot",
"values": {
"c++17": 201603L,
},
"headers": ["cmath"],
},
{"name": "__cpp_lib_math_special_functions",
"values": {
"c++17": 201603L,
},
"headers": ["cmath"],
"unimplemented": True,
},
{"name": "__cpp_lib_shared_mutex",
"values": {
"c++17": 201505L,
},
"headers": ["shared_mutex"],
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
{"name": "__cpp_lib_scoped_lock",
"values": {
"c++17": 201703L,
},
"headers": ["mutex"],
},
# C++2a
{"name": "__cpp_lib_char8_t",
"values": {
"c++2a": 201811L,
},
"headers": ["atomic", "filesystem", "istream", "limits", "locale", "ostream",
"string", "string_view"],
"depends": "defined(__cpp_char8_t)",
"internal_depends": "!defined(_LIBCUDACXX_NO_HAS_CHAR8_T)",
},
{"name": "__cpp_lib_erase_if",
"values": {
"c++2a": 201811L,
},
"headers": ["string", "deque", "forward_list", "list", "vector", "map",
"set", "unordered_map", "unordered_set"]
},
{"name": "__cpp_lib_destroying_delete",
"values": {
"c++2a": 201806L,
},
"headers": ["new"],
"depends":
"TEST_STD_VER > 17"
" && defined(__cpp_impl_destroying_delete)"
" && __cpp_impl_destroying_delete >= 201806L",
"internal_depends":
"_LIBCUDACXX_STD_VER > 17"
" && defined(__cpp_impl_destroying_delete)"
" && __cpp_impl_destroying_delete >= 201806L",
},
{"name": "__cpp_lib_three_way_comparison",
"values": {
"c++2a": 201711L,
},
"headers": ["compare"],
"unimplemented": True,
},
{"name": "__cpp_lib_concepts",
"values": {
"c++14": 202002L,
},
"headers": ["concepts"],
},
{"name": "__cpp_lib_constexpr_swap_algorithms",
"values": {
"c++2a": 201806L,
},
"headers": ["algorithm"],
"unimplemented": True,
},
{"name": "__cpp_lib_constexpr_misc",
"values": {
"c++2a": 201811L,
},
"headers": ["array", "functional", "iterator", "string_view", "tuple", "utility"],
"unimplemented": True,
},
{"name": "__cpp_lib_bind_front",
"values": {
"c++17": 201907L,
},
"headers": ["functional"],
},
{"name": "__cpp_lib_is_constant_evaluated",
"values": {
"c++2a": 201811L,
},
"headers": ["type_traits"],
"depends": "TEST_HAS_BUILTIN(__builtin_is_constant_evaluated) || TEST_GCC_VER >= 900",
"internal_depends": "defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED)",
},
{"name": "__cpp_lib_list_remove_return_type",
"values": {
"c++2a": 201806L,
},
"headers": ["forward_list", "list"],
"unimplemented": True,
},
{"name": "__cpp_lib_generic_unordered_lookup",
"values": {
"c++2a": 201811L,
},
"headers": ["unordered_map", "unordered_set"],
"unimplemented": True,
},
{"name": "__cpp_lib_ranges",
"values": {
"c++2a": 201811L,
},
"headers": ["algorithm", "functional", "iterator", "memory", "ranges"],
"unimplemented": True,
},
{"name": "__cpp_lib_bit_cast",
"values": {
"c++2a": 201806L,
},
"headers": ["bit"],
"unimplemented": True,
},
{"name": "__cpp_lib_atomic_ref",
"values": {
"c++2a": 201806L,
},
"headers": ["atomic"],
"unimplemented": True,
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
{"name": "__cpp_lib_interpolate",
"values": {
"c++2a": 201902L,
},
"headers": ["numeric"],
},
]], key=lambda tc: tc["name"])
def get_std_dialects():
std_dialects = ['c++14', 'c++17', 'c++2a']
return list(std_dialects)
def get_first_std(d):
for s in get_std_dialects():
if s in d.keys():
return s
return None
def get_last_std(d):
rev_dialects = get_std_dialects()
rev_dialects.reverse()
for s in rev_dialects:
if s in d.keys():
return s
return None
def get_std_before(d, std):
std_dialects = get_std_dialects()
candidates = std_dialects[0:std_dialects.index(std)]
candidates.reverse()
for cand in candidates:
if cand in d.keys():
return cand
return None
def get_value_before(d, std):
new_std = get_std_before(d, std)
if new_std is None:
return None
return d[new_std]
def get_for_std(d, std):
# This catches the C++11 case for which there should be no defined feature
# test macros.
std_dialects = get_std_dialects()
if std not in std_dialects:
return None
# Find the value for the newest C++ dialect between C++14 and std
std_list = list(std_dialects[0:std_dialects.index(std)+1])
std_list.reverse()
for s in std_list:
if s in d.keys():
return d[s]
return None
"""
Functions to produce the <version> header
"""
def produce_macros_definition_for_std(std):
result = ""
indent = 56
for tc in feature_test_macros:
if std not in tc["values"]:
continue
inner_indent = 1
if 'depends' in tc.keys():
assert 'internal_depends' in tc.keys()
result += "# if %s\n" % tc["internal_depends"]
inner_indent += 2
if get_value_before(tc["values"], std) is not None:
assert 'depends' not in tc.keys()
result += "# undef %s\n" % tc["name"]
line = "#%sdefine %s" % ((" " * inner_indent), tc["name"])
line += " " * (indent - len(line))
line += "%sL" % tc["values"][std]
if 'unimplemented' in tc.keys():
line = "// " + line
result += line
result += "\n"
if 'depends' in tc.keys():
result += "# endif\n"
return result
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def produce_version_synopsis():
indent = 56
header_indent = 56 + len("20XXYYL ")
result = ""
def indent_to(s, val):
if len(s) >= val:
return s
s += " " * (val - len(s))
return s
line = indent_to("Macro name", indent) + "Value"
line = indent_to(line, header_indent) + "Headers"
result += line + "\n"
for tc in feature_test_macros:
prev_defined_std = get_last_std(tc["values"])
line = "{name: <{indent}}{value}L ".format(name=tc['name'], indent=indent,
value=tc["values"][prev_defined_std])
headers = list(tc["headers"])
headers.remove("version")
for chunk in chunks(headers, 3):
line = indent_to(line, header_indent)
chunk = ['<%s>' % header for header in chunk]
line += ' '.join(chunk)
result += line
result += "\n"
line = ""
while True:
prev_defined_std = get_std_before(tc["values"], prev_defined_std)
if prev_defined_std is None:
break
result += "%s%sL // %s\n" % (indent_to("", indent), tc["values"][prev_defined_std],
prev_defined_std.replace("c++", "C++"))
return result
def produce_version_header():
template="""// -*- C++ -*-
//===--------------------------- version ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCUDACXX_VERSIONH
#define _LIBCUDACXX_VERSIONH
/*
version synopsis
{synopsis}
*/
#include <__config>
#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
#pragma GCC system_header
#endif
#if _LIBCUDACXX_STD_VER > 11
{cxx14_macros}
#endif
#if _LIBCUDACXX_STD_VER > 14
{cxx17_macros}
#endif
#if _LIBCUDACXX_STD_VER > 17
{cxx2a_macros}
#endif
#endif // _LIBCUDACXX_VERSIONH
"""
return template.format(
synopsis=produce_version_synopsis().strip(),
cxx14_macros=produce_macros_definition_for_std('c++14').strip(),
cxx17_macros=produce_macros_definition_for_std('c++17').strip(),
cxx2a_macros=produce_macros_definition_for_std('c++2a').strip())
"""
Functions to produce test files
"""
test_types = {
"undefined": """
# ifdef {name}
# error "{name} should not be defined before {std_first}"
# endif
""",
"depends": """
# if {depends}
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
# else
# ifdef {name}
# error "{name} should not be defined when {depends} is not defined!"
# endif
# endif
""",
"unimplemented": """
# if !defined(_LIBCUDACXX_VERSION)
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
# else // _LIBCUDACXX_VERSION
# ifdef {name}
# error "{name} should not be defined because it is unimplemented in libc++!"
# endif
# endif
""",
"defined":"""
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
"""
}
def generate_std_test(test_list, std):
result = ""
for tc in test_list:
val = get_for_std(tc["values"], std)
if val is not None:
val = "%sL" % val
if val is None:
result += test_types["undefined"].format(name=tc["name"], std_first=get_first_std(tc["values"]))
elif 'unimplemented' in tc.keys():
result += test_types["unimplemented"].format(name=tc["name"], value=val, std=std)
elif "depends" in tc.keys():
result += test_types["depends"].format(name=tc["name"], value=val, std=std, depends=tc["depends"])
else:
result += test_types["defined"].format(name=tc["name"], value=val, std=std)
return result
def generate_synopsis(test_list):
max_name_len = max([len(tc["name"]) for tc in test_list])
indent = max_name_len + 8
def mk_line(prefix, suffix):
return "{prefix: <{max_len}}{suffix}\n".format(prefix=prefix, suffix=suffix,
max_len=indent)
result = ""
result += mk_line("/* Constant", "Value")
for tc in test_list:
prefix = " %s" % tc["name"]
for std in [s for s in get_std_dialects() if s in tc["values"].keys()]:
result += mk_line(prefix, "%sL [%s]" % (tc["values"][std], std.replace("c++", "C++")))
prefix = ""
result += "*/"
return result
def is_threading_header_unsafe_to_include(h):
# NOTE: "<mutex>" does not blow up when included without threads.
return h in ['atomic', 'shared_mutex']
def produce_tests():
headers = set([h for tc in feature_test_macros for h in tc["headers"]])
for h in headers:
test_list = [tc for tc in feature_test_macros if h in tc["headers"]]
if not has_header(h):
for tc in test_list:
assert 'unimplemented' in tc.keys()
continue
test_tags = ""
if is_threading_header_unsafe_to_include(h):
test_tags += '\n// UNSUPPORTED: libcpp-has-no-threads\n'
test_body = \
"""//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// WARNING: This test was generated by {script_name}
// and should not be edited manually.
{test_tags}
// <{header}>
// Test the feature test macros defined by <{header}>
{synopsis}
#include <{header}>
#include "test_macros.h"
#if TEST_STD_VER < 14
{cxx11_tests}
#elif TEST_STD_VER == 14
{cxx14_tests}
#elif TEST_STD_VER == 17
{cxx17_tests}
#elif TEST_STD_VER > 17
{cxx2a_tests}
#endif // TEST_STD_VER > 17
int main(int, char**) {{ return 0; }}
""".format(script_name=script_name,
header=h,
test_tags=test_tags,
synopsis=generate_synopsis(test_list),
cxx11_tests=generate_std_test(test_list, 'c++11').strip(),
cxx14_tests=generate_std_test(test_list, 'c++14').strip(),
cxx17_tests=generate_std_test(test_list, 'c++17').strip(),
cxx2a_tests=generate_std_test(test_list, 'c++2a').strip())
test_name = "{header}.version.pass.cpp".format(header=h)
out_path = os.path.join(macro_test_path, test_name)
with open(out_path, 'w') as f:
f.write(test_body)
"""
Produce documentation for the feature test macros
"""
def make_widths(grid):
widths = []
for i in range(0, len(grid[0])):
cell_width = 2 + max(reduce(lambda x,y: x+y, [[len(row[i])] for row in grid], []))
widths += [cell_width]
return widths
def create_table(grid, indent):
indent_str = ' '*indent
col_widths = make_widths(grid)
num_cols = len(grid[0])
result = indent_str + add_divider(col_widths, 2)
header_flag = 2
for row_i in xrange(0, len(grid)):
row = grid[row_i]
result = result + indent_str + ' '.join([pad_cell(row[i], col_widths[i]) for i in range(0, len(row))]) + '\n'
is_cxx_header = row[0].startswith('**')
if row_i == len(grid) - 1:
header_flag = 2
result = result + indent_str + add_divider(col_widths, 1 if is_cxx_header else header_flag)
header_flag = 0
return result
def add_divider(widths, header_flag):
if header_flag == 2:
return ' '.join(['='*w for w in widths]) + '\n'
if header_flag == 1:
return '-'.join(['-'*w for w in widths]) + '\n'
else:
return ' '.join(['-'*w for w in widths]) + '\n'
def pad_cell(s, length, left_align=True):
padding = ((length - len(s)) * ' ')
return s + padding
def get_status_table():
table = [["Macro Name", "Value"]]
for std in get_std_dialects():
table += [["**" + std.replace("c++", "C++ ") + "**", ""]]
for tc in feature_test_macros:
if std not in tc["values"].keys():
continue
value = "``%sL``" % tc["values"][std]
if 'unimplemented' in tc.keys():
value = '*unimplemented*'
table += [["``%s``" % tc["name"], value]]
return table
def produce_docs():
doc_str = """.. _FeatureTestMacroTable:
==========================
Feature Test Macro Support
==========================
.. contents::
:local:
Overview
========
This file documents the feature test macros currently supported by libc++.
.. _feature-status:
Status
======
.. table:: Current Status
:name: feature-status-table
:widths: auto
{status_tables}
""".format(status_tables=create_table(get_status_table(), 4))
table_doc_path = os.path.join(docs_path, 'FeatureTestMacroTable.rst')
with open(table_doc_path, 'w') as f:
f.write(doc_str)
def main():
with tempfile.NamedTemporaryFile(mode='w', prefix='version.', delete=False) as tmp_file:
print("producing new <version> header as %s" % tmp_file.name)
tmp_file.write(produce_version_header())
produce_tests()
produce_docs()
if __name__ == '__main__':
main()
| libcudacxx-main | .upstream-tests/utils/generate_feature_test_macro_components.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_match - Match all symbols in a list against a list of regexes.
"""
from argparse import ArgumentParser
import sys
from libcudacxx.sym_check import util, match, extract
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument(
'--blacklist', dest='blacklist',
type=str, action='store', default=None)
parser.add_argument(
'symbol_list', metavar='symbol_list', type=str,
help='The file containing the old symbol list')
parser.add_argument(
'regexes', metavar='regexes', default=[], nargs='*',
help='The file containing the new symbol list or a library')
args = parser.parse_args()
if not args.regexes and args.blacklist is None:
sys.stderr.write('Either a regex or a blacklist must be specified.\n')
sys.exit(1)
if args.blacklist:
search_list = util.read_blacklist(args.blacklist)
else:
search_list = args.regexes
symbol_list = util.extract_or_load(args.symbol_list)
matching_count, report = match.find_and_report_matching(
symbol_list, search_list)
sys.stdout.write(report)
if matching_count != 0:
print('%d matching symbols found...' % matching_count)
if __name__ == '__main__':
main()
| libcudacxx-main | .upstream-tests/utils/sym_match.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from argparse import ArgumentParser
from ctypes.util import find_library
import distutils.spawn
import glob
import tempfile
import os
import shutil
import subprocess
import signal
import sys
temp_directory_root = None
def exit_with_cleanups(status):
if temp_directory_root is not None:
shutil.rmtree(temp_directory_root)
sys.exit(status)
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
exit_with_cleanups(1)
def find_and_diagnose_missing(lib, search_paths):
if os.path.exists(lib):
return os.path.abspath(lib)
if not lib.startswith('lib') or not lib.endswith('.a'):
print_and_exit(("input file '%s' not not name a static library. "
"It should start with 'lib' and end with '.a") % lib)
for sp in search_paths:
assert type(sp) is list and len(sp) == 1
path = os.path.join(sp[0], lib)
if os.path.exists(path):
return os.path.abspath(path)
print_and_exit("input '%s' does not exist" % lib)
def execute_command(cmd, cwd=None):
"""
Execute a command, capture and return its output.
"""
kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd,
'universal_newlines': True
}
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def execute_command_verbose(cmd, cwd=None, verbose=False):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = execute_command(cmd, cwd=cwd)
if exitCode != 0 or verbose:
report = "Command: %s\n" % ' '.join(["'%s'" % a for a in cmd])
if exitCode != 0:
report += "Exit Code: %d\n" % exitCode
if out:
report += "Standard Output:\n--\n%s--" % out
if err:
report += "Standard Error:\n--\n%s--" % err
if exitCode != 0:
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
if exitCode != 0:
exit_with_cleanups(exitCode)
return out
def main():
parser = ArgumentParser(
description="Merge multiple archives into a single library")
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
parser.add_argument(
'-L', dest='search_paths',
help='Paths to search for the libraries along', action='append',
nargs=1)
parser.add_argument(
'--ar', dest='ar_exe', required=False,
help='The ar executable to use, finds \'ar\' in the path if not given',
type=str, action='store')
parser.add_argument(
'--use-libtool', dest='use_libtool', action='store_true', default=False)
parser.add_argument(
'--libtool', dest='libtool_exe', required=False,
help='The libtool executable to use, finds \'libtool\' in the path if not given',
type=str, action='store')
parser.add_argument(
'archives', metavar='archives', nargs='+',
help='The archives to merge')
args = parser.parse_args()
ar_exe = args.ar_exe
if not ar_exe:
ar_exe = distutils.spawn.find_executable('ar')
if not ar_exe:
print_and_exit("failed to find 'ar' executable")
if args.use_libtool:
libtool_exe = args.libtool_exe
if not libtool_exe:
libtool_exe = distutils.spawn.find_executable('libtool')
if not libtool_exe:
print_and_exit("failed to find 'libtool' executable")
if len(args.archives) < 2:
print_and_exit('fewer than 2 inputs provided')
archives = [find_and_diagnose_missing(ar, args.search_paths)
for ar in args.archives]
print ('Merging archives: %s' % archives)
if not os.path.exists(os.path.dirname(args.output)):
print_and_exit("output path doesn't exist: '%s'" % args.output)
global temp_directory_root
temp_directory_root = tempfile.mkdtemp('.libcxx.merge.archives')
files = []
for arc in archives:
execute_command_verbose([ar_exe, 'x', arc],
cwd=temp_directory_root, verbose=args.verbose)
out = execute_command_verbose([ar_exe, 't', arc])
files.extend(out.splitlines())
if args.use_libtool:
files = [f for f in files if not f.startswith('__.SYMDEF')]
execute_command_verbose([libtool_exe, '-static', '-o', args.output] + files,
cwd=temp_directory_root, verbose=args.verbose)
else:
execute_command_verbose([ar_exe, 'rcs', args.output] + files,
cwd=temp_directory_root, verbose=args.verbose)
if __name__ == '__main__':
main()
exit_with_cleanups(0)
| libcudacxx-main | .upstream-tests/utils/merge_archives.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""not.py is a utility for inverting the return code of commands.
It acts similar to llvm/utils/not.
ex: python /path/to/not.py ' echo hello
echo $? // (prints 1)
"""
import subprocess
import sys
def which_cannot_find_program(prog):
# Allow for import errors on distutils.spawn
try:
import distutils.spawn
prog = distutils.spawn.find_executable(prog[0])
if prog is None:
sys.stderr.write('Failed to find program %s' % prog[0])
return True
return False
except:
return False
def main():
argv = list(sys.argv)
del argv[0]
if len(argv) > 0 and argv[0] == '--crash':
del argv[0]
expectCrash = True
else:
expectCrash = False
if len(argv) == 0:
return 1
if which_cannot_find_program(argv[0]):
return 1
rc = subprocess.call(argv)
if rc < 0:
return 0 if expectCrash else 1
if expectCrash:
return 1
return rc == 0
if __name__ == '__main__':
exit(main())
| libcudacxx-main | .upstream-tests/utils/not.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""GDB pretty-printers for libc++.
These should work for objects compiled when _LIBCUDACXX_ABI_UNSTABLE is defined
and when it is undefined.
"""
from __future__ import print_function
import re
import gdb
# One under-documented feature of the gdb pretty-printer API
# is that clients can call any other member of the API
# before they call to_string.
# Therefore all self.FIELDs must be set in the pretty-printer's
# __init__ function.
_void_pointer_type = gdb.lookup_type("void").pointer()
_long_int_type = gdb.lookup_type("unsigned long long")
_libcpp_big_endian = False
def addr_as_long(addr):
return int(addr.cast(_long_int_type))
# The size of a pointer in bytes.
_pointer_size = _void_pointer_type.sizeof
def _remove_cxx_namespace(typename):
"""Removed libc++ specific namespace from the type.
Arguments:
typename(string): A type, such as std::__u::something.
Returns:
A string without the libc++ specific part, such as std::something.
"""
return re.sub("std::__.*?::", "std::", typename)
def _remove_generics(typename):
"""Remove generics part of the type. Assumes typename is not empty.
Arguments:
typename(string): A type such as std::my_collection<element>.
Returns:
The prefix up to the generic part, such as std::my_collection.
"""
match = re.match("^([^<]+)", typename)
return match.group(1)
# Some common substitutions on the types to reduce visual clutter (A user who
# wants to see the actual details can always use print/r).
_common_substitutions = [
("std::basic_string<char, std::char_traits<char>, std::allocator<char> >",
"std::string"),
]
def _prettify_typename(gdb_type):
"""Returns a pretty name for the type, or None if no name can be found.
Arguments:
gdb_type(gdb.Type): A type object.
Returns:
A string, without type_defs, libc++ namespaces, and common substitutions
applied.
"""
type_without_typedefs = gdb_type.strip_typedefs()
typename = type_without_typedefs.name or type_without_typedefs.tag or \
str(type_without_typedefs)
result = _remove_cxx_namespace(typename)
for find_str, subst_str in _common_substitutions:
result = re.sub(find_str, subst_str, result)
return result
def _typename_for_nth_generic_argument(gdb_type, n):
"""Returns a pretty string for the nth argument of the given type.
Arguments:
gdb_type(gdb.Type): A type object, such as the one for std::map<int, int>
n: The (zero indexed) index of the argument to return.
Returns:
A string for the nth argument, such a "std::string"
"""
element_type = gdb_type.template_argument(n)
return _prettify_typename(element_type)
def _typename_with_n_generic_arguments(gdb_type, n):
"""Return a string for the type with the first n (1, ...) generic args."""
base_type = _remove_generics(_prettify_typename(gdb_type))
arg_list = [base_type]
template = "%s<"
for i in range(n):
arg_list.append(_typename_for_nth_generic_argument(gdb_type, i))
template += "%s, "
result = (template[:-2] + ">") % tuple(arg_list)
return result
def _typename_with_first_generic_argument(gdb_type):
return _typename_with_n_generic_arguments(gdb_type, 1)
class StdTuplePrinter(object):
"""Print a std::tuple."""
class _Children(object):
"""Class to iterate over the tuple's children."""
def __init__(self, val):
self.val = val
self.child_iter = iter(self.val["__base_"].type.fields())
self.count = 0
def __iter__(self):
return self
def next(self):
# child_iter raises StopIteration when appropriate.
field_name = self.child_iter.next()
child = self.val["__base_"][field_name]["__value_"]
self.count += 1
return ("[%d]" % self.count, child)
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.val.type.fields():
return "empty %s" % typename
return "%s containing" % typename
def children(self):
if not self.val.type.fields():
return iter(())
return self._Children(self.val)
def _get_base_subobject(child_class_value, index=0):
"""Returns the object's value in the form of the parent class at index.
This function effectively casts the child_class_value to the base_class's
type, but the type-to-cast to is stored in the field at index, and once
we know the field, we can just return the data.
Args:
child_class_value: the value to cast
index: the parent class index
Raises:
Exception: field at index was not a base-class field.
"""
field = child_class_value.type.fields()[index]
if not field.is_base_class:
raise Exception("Not a base-class field.")
return child_class_value[field]
def _value_of_pair_first(value):
"""Convenience for _get_base_subobject, for the common case."""
return _get_base_subobject(value, 0)["__value_"]
class StdStringPrinter(object):
"""Print a std::string."""
def _get_short_size(self, short_field, short_size):
"""Short size depends on both endianness and a compile-time define."""
# If the padding field is present after all this indirection, then string
# was compiled with _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT defined.
field = short_field.type.fields()[1].type.fields()[0]
libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name
# This logical structure closely follows the original code (which is clearer
# in C++). Keep them parallel to make them easier to compare.
if libcpp_abi_alternate_string_layout:
if _libcpp_big_endian:
return short_size >> 1
else:
return short_size
elif _libcpp_big_endian:
return short_size
else:
return short_size >> 1
def __init__(self, val):
self.val = val
def to_string(self):
"""Build a python string from the data whether stored inline or separately."""
value_field = _value_of_pair_first(self.val["__r_"])
short_field = value_field["__s"]
short_size = short_field["__size_"]
if short_size == 0:
return ""
short_mask = self.val["__short_mask"]
# Counter intuitive to compare the size and short_mask to see if the string
# is long, but that's the way the implementation does it. Note that
# __is_long() doesn't use get_short_size in C++.
is_long = short_size & short_mask
if is_long:
long_field = value_field["__l"]
data = long_field["__data_"]
size = long_field["__size_"]
else:
data = short_field["__data_"]
size = self._get_short_size(short_field, short_size)
if hasattr(data, "lazy_string"):
return data.lazy_string(length=size)
return data.string(length=size)
def display_hint(self):
return "string"
class StdUniquePtrPrinter(object):
"""Print a std::unique_ptr."""
def __init__(self, val):
self.val = val
self.addr = _value_of_pair_first(self.val["__ptr_"])
self.pointee_type = self.val.type.template_argument(0)
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.addr:
return "%s is nullptr" % typename
return ("%s<%s> containing" %
(typename,
_remove_generics(_prettify_typename(self.pointee_type))))
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr.cast(self.pointee_type.pointer())
def children(self):
return self
class StdSharedPointerPrinter(object):
"""Print a std::shared_ptr."""
def __init__(self, val):
self.val = val
self.addr = self.val["__ptr_"]
def to_string(self):
"""Returns self as a string."""
typename = _remove_generics(_prettify_typename(self.val.type))
pointee_type = _remove_generics(
_prettify_typename(self.val.type.template_argument(0)))
if not self.addr:
return "%s is nullptr" % typename
refcount = self.val["__cntrl_"]
if refcount != 0:
usecount = refcount["__shared_owners_"] + 1
weakcount = refcount["__shared_weak_owners_"]
if usecount == 0:
state = "expired, weak %d" % weakcount
else:
state = "count %d, weak %d" % (usecount, weakcount)
return "%s<%s> %s containing" % (typename, pointee_type, state)
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr
def children(self):
return self
class StdVectorPrinter(object):
"""Print a std::vector."""
class _VectorBoolIterator(object):
"""Class to iterate over the bool vector's children."""
def __init__(self, begin, size, bits_per_word):
self.item = begin
self.size = size
self.bits_per_word = bits_per_word
self.count = 0
self.offset = 0
def __iter__(self):
return self
def next(self):
"""Retrieve the next element."""
self.count += 1
if self.count > self.size:
raise StopIteration
entry = self.item.dereference()
if entry & (1 << self.offset):
outbit = 1
else:
outbit = 0
self.offset += 1
if self.offset >= self.bits_per_word:
self.item += 1
self.offset = 0
return ("[%d]" % self.count, outbit)
class _VectorIterator(object):
"""Class to iterate over the non-bool vector's children."""
def __init__(self, begin, end):
self.item = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
self.count += 1
if self.item == self.end:
raise StopIteration
entry = self.item.dereference()
self.item += 1
return ("[%d]" % self.count, entry)
def __init__(self, val):
"""Set val, length, capacity, and iterator for bool and normal vectors."""
self.val = val
self.typename = _remove_generics(_prettify_typename(val.type))
begin = self.val["__begin_"]
if self.val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL:
self.typename += "<bool>"
self.length = self.val["__size_"]
bits_per_word = self.val["__bits_per_word"]
self.capacity = _value_of_pair_first(
self.val["__cap_alloc_"]) * bits_per_word
self.iterator = self._VectorBoolIterator(
begin, self.length, bits_per_word)
else:
end = self.val["__end_"]
self.length = end - begin
self.capacity = _get_base_subobject(
self.val["__end_cap_"])["__value_"] - begin
self.iterator = self._VectorIterator(begin, end)
def to_string(self):
return ("%s of length %d, capacity %d" %
(self.typename, self.length, self.capacity))
def children(self):
return self.iterator
def display_hint(self):
return "array"
class StdBitsetPrinter(object):
"""Print a std::bitset."""
def __init__(self, val):
self.val = val
self.n_words = int(self.val["__n_words"])
self.bits_per_word = int(self.val["__bits_per_word"])
if self.n_words == 1:
self.values = [int(self.val["__first_"])]
else:
self.values = [int(self.val["__first_"][index])
for index in range(self.n_words)]
def to_string(self):
typename = _prettify_typename(self.val.type)
return "%s" % typename
def _byte_it(self, value):
index = -1
while value:
index += 1
will_yield = value % 2
value /= 2
if will_yield:
yield index
def _list_it(self):
for word_index in range(self.n_words):
current = self.values[word_index]
if current:
for n in self._byte_it(current):
yield ("[%d]" % (word_index * self.bits_per_word + n), 1)
def __iter__(self):
return self._list_it()
def children(self):
return self
class StdDequePrinter(object):
"""Print a std::deque."""
def __init__(self, val):
self.val = val
self.size = int(_value_of_pair_first(val["__size_"]))
self.start_ptr = self.val["__map_"]["__begin_"]
self.first_block_start_index = int(self.val["__start_"])
self.node_type = self.start_ptr.type
self.block_size = self._calculate_block_size(
val.type.template_argument(0))
def _calculate_block_size(self, element_type):
"""Calculates the number of elements in a full block."""
size = element_type.sizeof
# Copied from struct __deque_block_size implementation of libcxx.
return 4096 / size if size < 256 else 16
def _bucket_it(self, start_addr, start_index, end_index):
for i in range(start_index, end_index):
yield i, (start_addr.dereference() + i).dereference()
def _list_it(self):
"""Primary iteration worker."""
num_emitted = 0
current_addr = self.start_ptr
start_index = self.first_block_start_index
while num_emitted < self.size:
end_index = min(start_index + self.size -
num_emitted, self.block_size)
for _, elem in self._bucket_it(current_addr, start_index, end_index):
yield "", elem
num_emitted += end_index - start_index
current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \
.cast(self.node_type)
start_index = 0
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def __iter__(self):
return self._list_it()
def children(self):
return self
def display_hint(self):
return "array"
class StdListPrinter(object):
"""Print a std::list."""
def __init__(self, val):
self.val = val
size_alloc_field = self.val["__size_alloc_"]
self.size = int(_value_of_pair_first(size_alloc_field))
dummy_node = self.val["__end_"]
self.nodetype = gdb.lookup_type(
re.sub("__list_node_base", "__list_node",
str(dummy_node.type.strip_typedefs()))).pointer()
self.first_node = dummy_node["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _list_iter(self):
current_node = self.first_node
for _ in range(self.size):
yield "", current_node.cast(self.nodetype).dereference()["__value_"]
current_node = current_node.dereference()["__next_"]
def __iter__(self):
return self._list_iter()
def children(self):
return self if self.nodetype else iter(())
def display_hint(self):
return "array"
class StdQueueOrStackPrinter(object):
"""Print a std::queue or std::stack."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class StdPriorityQueuePrinter(object):
"""Print a std::priority_queue."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
# TODO(tamur): It would be nice to print the top element. The technical
# difficulty is that, the implementation refers to the underlying
# container, which is a generic class. libstdcxx pretty printers do not
# print the top element.
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class RBTreeUtils(object):
"""Utility class for std::(multi)map, and std::(multi)set and iterators."""
def __init__(self, cast_type, root):
self.cast_type = cast_type
self.root = root
def left_child(self, node):
result = node.cast(self.cast_type).dereference()["__left_"]
return result
def right_child(self, node):
result = node.cast(self.cast_type).dereference()["__right_"]
return result
def parent(self, node):
"""Return the parent of node, if it exists."""
# If this is the root, then from the algorithm's point of view, it has no
# parent.
if node == self.root:
return None
# We don't have enough information to tell if this is the end_node (which
# doesn't have a __parent_ field), or the root (which doesn't have a parent
# from the algorithm's point of view), so cast_type may not be correct for
# this particular node. Use heuristics.
# The end_node's left child is the root. Note that when printing interators
# in isolation, the root is unknown.
if self.left_child(node) == self.root:
return None
parent = node.cast(self.cast_type).dereference()["__parent_"]
# If the value at the offset of __parent_ doesn't look like a valid pointer,
# then assume that node is the end_node (and therefore has no parent).
# End_node type has a pointer embedded, so should have pointer alignment.
if addr_as_long(parent) % _void_pointer_type.alignof:
return None
# This is ugly, but the only other option is to dereference an invalid
# pointer. 0x8000 is fairly arbitrary, but has had good results in
# practice. If there was a way to tell if a pointer is invalid without
# actually dereferencing it and spewing error messages, that would be ideal.
if parent < 0x8000:
return None
return parent
def is_left_child(self, node):
parent = self.parent(node)
return parent is not None and self.left_child(parent) == node
def is_right_child(self, node):
parent = self.parent(node)
return parent is not None and self.right_child(parent) == node
class AbstractRBTreePrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set."""
def __init__(self, val):
self.val = val
tree = self.val["__tree_"]
self.size = int(_value_of_pair_first(tree["__pair3_"]))
dummy_root = tree["__pair1_"]
root = _value_of_pair_first(dummy_root)["__left_"]
cast_type = self._init_cast_type(val.type)
self.util = RBTreeUtils(cast_type, root)
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def _traverse(self):
"""Traverses the binary search tree in order."""
current = self.util.root
skip_left_child = False
while True:
if not skip_left_child and self.util.left_child(current):
current = self.util.left_child(current)
continue
skip_left_child = False
for key_value in self._get_key_value(current):
yield "", key_value
right_child = self.util.right_child(current)
if right_child:
current = right_child
continue
while self.util.is_right_child(current):
current = self.util.parent(current)
if self.util.is_left_child(current):
current = self.util.parent(current)
skip_left_child = True
continue
break
def __iter__(self):
return self._traverse()
def children(self):
return self if self.util.cast_type and self.size > 0 else iter(())
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
class StdMapPrinter(AbstractRBTreePrinter):
"""Print a std::map or std::multimap."""
def _init_cast_type(self, val_type):
map_it_type = gdb.lookup_type(
str(val_type) + "::iterator").strip_typedefs()
tree_it_type = map_it_type.template_argument(0)
node_ptr_type = tree_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "map"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()[
"__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
class StdSetPrinter(AbstractRBTreePrinter):
"""Print a std::set."""
def _init_cast_type(self, val_type):
set_it_type = gdb.lookup_type(
str(val_type) + "::iterator").strip_typedefs()
node_ptr_type = set_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "array"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()["__value_"]
return [key_value]
class AbstractRBTreeIteratorPrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set iterator."""
def _initialize(self, val, typename):
self.typename = typename
self.val = val
self.addr = self.val["__ptr_"]
cast_type = self.val.type.template_argument(1)
self.util = RBTreeUtils(cast_type, None)
if self.addr:
self.node = self.addr.cast(cast_type).dereference()
def _is_valid_node(self):
if not self.util.parent(self.addr):
return False
return self.util.is_left_child(self.addr) or \
self.util.is_right_child(self.addr)
def to_string(self):
if not self.addr:
return "%s is nullptr" % self.typename
return "%s " % self.typename
def _get_node_value(self, node):
raise NotImplementedError
def __iter__(self):
addr_str = "[%s]" % str(self.addr)
if not self._is_valid_node():
yield addr_str, " end()"
else:
yield addr_str, self._get_node_value(self.node)
def children(self):
return self if self.addr else iter(())
class MapIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self._initialize(val["__i_"],
_remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]["__cc"]
class SetIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self._initialize(val, _remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]
class StdFposPrinter(object):
"""Print a std::fpos or std::streampos."""
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
offset = self.val["__off_"]
state = self.val["__st_"]
count = state["__count"]
value = state["__value"]["__wch"]
return "%s with stream offset:%s with state: {count:%s value:%s}" % (
typename, offset, count, value)
class AbstractUnorderedCollectionPrinter(object):
"""Abstract super class for std::unordered_(multi)[set|map]."""
def __init__(self, val):
self.val = val
self.table = val["__table_"]
self.sentinel = self.table["__p1_"]
self.size = int(_value_of_pair_first(self.table["__p2_"]))
node_base_type = self.sentinel.type.template_argument(0)
self.cast_type = node_base_type.template_argument(0)
def _list_it(self, sentinel_ptr):
next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"]
while str(next_ptr.cast(_void_pointer_type)) != "0x0":
next_val = next_ptr.cast(self.cast_type).dereference()
for key_value in self._get_key_value(next_val):
yield "", key_value
next_ptr = next_val["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def children(self):
return self if self.cast_type and self.size > 0 else iter(())
def __iter__(self):
return self._list_it(self.sentinel)
class StdUnorderedSetPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)set."""
def _get_key_value(self, node):
return [node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)map."""
def _get_key_value(self, node):
key_value = node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
class AbstractHashMapIteratorPrinter(object):
"""Abstract class for unordered collection iterators."""
def _initialize(self, val, addr):
self.val = val
self.typename = _remove_generics(_prettify_typename(self.val.type))
self.addr = addr
if self.addr:
self.node = self.addr.cast(self.cast_type).dereference()
def _get_key_value(self):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def to_string(self):
if not self.addr:
return "%s = end()" % self.typename
return "%s " % self.typename
def children(self):
return self if self.addr else iter(())
def __iter__(self):
for key_value in self._get_key_value():
yield "", key_value
class StdUnorderedSetIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0)
self._initialize(val, val["__node_"])
def _get_key_value(self):
return [self.node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0).template_argument(0)
self._initialize(val, val["__i_"]["__node_"])
def _get_key_value(self):
key_value = self.node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
def _remove_std_prefix(typename):
match = re.match("^std::(.+)", typename)
return match.group(1) if match is not None else ""
class LibcxxPrettyPrinter(object):
"""PrettyPrinter object so gdb-commands like 'info pretty-printers' work."""
def __init__(self, name):
super(LibcxxPrettyPrinter, self).__init__()
self.name = name
self.enabled = True
self.lookup = {
"basic_string": StdStringPrinter,
"string": StdStringPrinter,
"tuple": StdTuplePrinter,
"unique_ptr": StdUniquePtrPrinter,
"shared_ptr": StdSharedPointerPrinter,
"weak_ptr": StdSharedPointerPrinter,
"bitset": StdBitsetPrinter,
"deque": StdDequePrinter,
"list": StdListPrinter,
"queue": StdQueueOrStackPrinter,
"stack": StdQueueOrStackPrinter,
"priority_queue": StdPriorityQueuePrinter,
"map": StdMapPrinter,
"multimap": StdMapPrinter,
"set": StdSetPrinter,
"multiset": StdSetPrinter,
"vector": StdVectorPrinter,
"__map_iterator": MapIteratorPrinter,
"__map_const_iterator": MapIteratorPrinter,
"__tree_iterator": SetIteratorPrinter,
"__tree_const_iterator": SetIteratorPrinter,
"fpos": StdFposPrinter,
"unordered_set": StdUnorderedSetPrinter,
"unordered_multiset": StdUnorderedSetPrinter,
"unordered_map": StdUnorderedMapPrinter,
"unordered_multimap": StdUnorderedMapPrinter,
"__hash_map_iterator": StdUnorderedMapIteratorPrinter,
"__hash_map_const_iterator": StdUnorderedMapIteratorPrinter,
"__hash_iterator": StdUnorderedSetIteratorPrinter,
"__hash_const_iterator": StdUnorderedSetIteratorPrinter,
}
self.subprinters = []
for name, subprinter in self.lookup.items():
# Subprinters and names are used only for the rarely used command "info
# pretty" (and related), so the name of the first data structure it prints
# is a reasonable choice.
if subprinter not in self.subprinters:
subprinter.name = name
self.subprinters.append(subprinter)
def __call__(self, val):
"""Return the pretty printer for a val, if the type is supported."""
# Do not handle any type that is not a struct/class.
if val.type.strip_typedefs().code != gdb.TYPE_CODE_STRUCT:
return None
# Don't attempt types known to be inside libstdcxx.
typename = val.type.name or val.type.tag or str(val.type)
match = re.match("^std::(__.*?)::", typename)
if match is None or match.group(1) in ["__cxx1998",
"__debug",
"__7",
"__g"]:
return None
# Handle any using declarations or other typedefs.
typename = _prettify_typename(val.type)
if not typename:
return None
without_generics = _remove_generics(typename)
lookup_name = _remove_std_prefix(without_generics)
if lookup_name in self.lookup:
return self.lookup[lookup_name](val)
return None
_libcxx_printer_name = "libcxx_pretty_printer"
# These are called for every binary object file, which could be thousands in
# certain pathological cases. Limit our pretty printers to the progspace.
def _register_libcxx_printers(event):
progspace = event.new_objfile.progspace
# It would be ideal to get the endianness at print time, but
# gdb.execute clears gdb's internal wrap buffer, removing any values
# already generated as part of a larger data structure, and there is
# no python api to get the endianness. Mixed-endianness debugging
# rare enough that this workaround should be adequate.
_libcpp_big_endian = "big endian" in gdb.execute("show endian",
to_string=True)
if not getattr(progspace, _libcxx_printer_name, False):
print("Loading libc++ pretty-printers.")
gdb.printing.register_pretty_printer(
progspace, LibcxxPrettyPrinter(_libcxx_printer_name))
setattr(progspace, _libcxx_printer_name, True)
def _unregister_libcxx_printers(event):
progspace = event.progspace
if getattr(progspace, _libcxx_printer_name, False):
for printer in progspace.pretty_printers:
if getattr(printer, "name", "none") == _libcxx_printer_name:
progspace.pretty_printers.remove(printer)
setattr(progspace, _libcxx_printer_name, False)
break
def register_libcxx_printer_loader():
"""Register event handlers to load libc++ pretty-printers."""
gdb.events.new_objfile.connect(_register_libcxx_printers)
gdb.events.clear_objfiles.connect(_unregister_libcxx_printers)
| libcudacxx-main | .upstream-tests/utils/gdb/libcxx/printers.py |
#! /usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)
| libcudacxx-main | .upstream-tests/utils/google-benchmark/mingw.py |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pedantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| libcudacxx-main | .upstream-tests/utils/google-benchmark/.ycm_extra_conf.py |
#!/usr/bin/env python
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def process_asm(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| libcudacxx-main | .upstream-tests/utils/google-benchmark/tools/strip_asm.py |
#!/usr/bin/env python
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline)
json2 = json2_orig = gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
# Diff and output
output_lines = gbench.report.generate_difference_report(
json1, json2, args.display_aggregates_only,
args.utest, args.utest_alpha)
print(description)
for ln in output_lines:
print(ln)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| libcudacxx-main | .upstream-tests/utils/google-benchmark/tools/compare.py |
"""util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import tempfile
import subprocess
import sys
# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, mode='rb') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == b'MZ'
else:
return magic_bytes == b'\x7FELF'
def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except BaseException:
pass
return False
def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg
def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith('--') and prefix.endswith('=')
result = None
for f in benchmark_flags:
if f.startswith(prefix):
result = f[len(prefix):]
return result
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
with open(fname, 'r') as f:
return json.load(f)
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
output_name = find_benchmark_flag('--benchmark_out=',
benchmark_flags)
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(output_name)
if is_temp_output:
os.unlink(output_name)
return json_res
def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
else:
assert False # This branch is unreachable
| libcudacxx-main | .upstream-tests/utils/google-benchmark/tools/gbench/util.py |
"""Google Benchmark tooling"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| libcudacxx-main | .upstream-tests/utils/google-benchmark/tools/gbench/__init__.py |
import unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os
import re
import copy
from scipy.stats import mannwhitneyu
class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))
def __format__(self, format):
return self.code
# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
UTEST_MIN_REPETITIONS = 2
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
UTEST_COL_NAME = "_pvalue"
def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
def filter_benchmark(json_orig, family, replacement=""):
"""
Apply a filter to the json, and only leave the 'family' of benchmarks.
"""
regex = re.compile(family)
filtered = {}
filtered['benchmarks'] = []
for be in json_orig['benchmarks']:
if not regex.search(be['name']):
continue
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
filtered['benchmarks'].append(filteredbench)
return filtered
def get_unique_benchmark_names(json):
"""
While *keeping* the order, give all the unique 'names' used for benchmarks.
"""
seen = set()
uniqued = [x['name'] for x in json['benchmarks']
if x['name'] not in seen and
(seen.add(x['name']) or True)]
return uniqued
def intersect(list1, list2):
"""
Given two lists, get a new list consisting of the elements only contained
in *both of the input lists*, while preserving the ordering.
"""
return [x for x in list1 if x in list2]
def partition_benchmarks(json1, json2):
"""
While preserving the ordering, find benchmarks with the same names in
both of the inputs, and group them.
(i.e. partition/filter into groups with common name)
"""
json1_unique_names = get_unique_benchmark_names(json1)
json2_unique_names = get_unique_benchmark_names(json2)
names = intersect(json1_unique_names, json2_unique_names)
partitions = []
for name in names:
# Pick the time unit from the first entry of the lhs benchmark.
time_unit = (x['time_unit']
for x in json1['benchmarks'] if x['name'] == name).next()
# Filter by name and time unit.
lhs = [x for x in json1['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
rhs = [x for x in json2['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
partitions.append([lhs, rhs])
return partitions
def extract_field(partition, field_name):
# The count of elements may be different. We want *all* of them.
lhs = [x[field_name] for x in partition[0]]
rhs = [x[field_name] for x in partition[1]]
return [lhs, rhs]
def print_utest(partition, utest_alpha, first_col_width, use_color=True):
timings_time = extract_field(partition, 'real_time')
timings_cpu = extract_field(partition, 'cpu_time')
min_rep_cnt = min(len(timings_time[0]),
len(timings_time[1]),
len(timings_cpu[0]),
len(timings_cpu[1]))
# Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
if min_rep_cnt < UTEST_MIN_REPETITIONS:
return []
def get_utest_color(pval):
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
time_pvalue = mannwhitneyu(
timings_time[0], timings_time[1], alternative='two-sided').pvalue
cpu_pvalue = mannwhitneyu(
timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
dsc = "U Test, Repetitions: {} vs {}".format(
len(timings_cpu[0]), len(timings_cpu[1]))
dsc_color = BC_OKGREEN
if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
last_name = partition[0][0]['name']
return [color_format(use_color,
special_str,
BC_HEADER,
"{}{}".format(last_name, UTEST_COL_NAME),
first_col_width,
get_utest_color(time_pvalue), time_pvalue,
get_utest_color(cpu_pvalue), cpu_pvalue,
dsc_color, dsc,
endc=BC_ENDC)]
def generate_difference_report(
json1,
json2,
display_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
assert utest is True or utest is False
first_col_width = find_longest_name(json1['benchmarks'])
def find_test(name):
for b in json2['benchmarks']:
if b['name'] == name:
return b
return None
first_col_width = max(
first_col_width,
len('Benchmark'))
first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
partitions = partition_benchmarks(json1, json2)
for partition in partitions:
# Careful, we may have different repetition count.
for i in range(min(len(partition[0]), len(partition[1]))):
bn = partition[0][i]
other_bench = partition[1][i]
# *If* we were asked to only display aggregates,
# and if it is non-aggregate, then skip it.
if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
assert bn['run_type'] == other_bench['run_type']
if bn['run_type'] != 'aggregate':
continue
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
tres = calculate_change(bn['real_time'], other_bench['real_time'])
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
output_strs += [color_format(use_color,
fmt_str,
BC_HEADER,
bn['name'],
first_col_width,
get_color(tres),
tres,
get_color(cpures),
cpures,
bn['real_time'],
other_bench['real_time'],
bn['cpu_time'],
other_bench['cpu_time'],
endc=BC_ENDC)]
# After processing the whole partition, if requested, do the U test.
if utest:
output_strs += print_utest(partition,
utest_alpha=utest_alpha,
first_col_width=first_col_width,
use_color=use_color)
return output_strs
###############################################################################
# Unit tests
class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test3_run0.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
'BM_One',
'BM_Two',
'short', # These two are not sorted
'medium', # These two are not sorted
]
json = self.load_results()
output_lines = get_unique_benchmark_names(json)
print("\n")
print("\n".join(output_lines))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i])
class TestReportDifference(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_basic(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
['BM_100xSlower', '+99.0000', '+99.0000',
'100', '10000', '100', '10000'],
['BM_100xFaster', '-0.9900', '-0.9900',
'10000', '100', '10000', '100'],
['BM_10PercentCPUToTime', '+0.1000',
'-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
def load_result(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test2_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
json = self.load_result()
json1 = filter_benchmark(json, "BM_Z.ro", ".")
json2 = filter_benchmark(json, "BM_O.e", ".")
output_lines_with_header = generate_difference_report(
json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceWithUTest(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_utest(self):
expect_lines = []
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
json1, json2, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_utest(self):
expect_lines = []
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
json1, json2, display_aggregates_only=True,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
if __name__ == '__main__':
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| libcudacxx-main | .upstream-tests/utils/google-benchmark/tools/gbench/report.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import platform
import os
import libcudacxx.util
class CXXCompiler(object):
CM_Default = 0
CM_PreProcess = 1
CM_Compile = 2
CM_Link = 3
def __init__(self, path, first_arg,
flags=None, compile_flags=None, link_flags=None,
warning_flags=None, verify_supported=None,
verify_flags=None, use_verify=False,
modules_flags=None, use_modules=False,
use_ccache=False, use_warnings=False, compile_env=None,
cxx_type=None, cxx_version=None):
self.source_lang = 'c++'
self.path = path
self.first_arg = first_arg or ''
self.flags = list(flags or [])
self.compile_flags = list(compile_flags or [])
self.link_flags = list(link_flags or [])
self.warning_flags = list(warning_flags or [])
self.verify_supported = verify_supported
self.use_verify = use_verify
self.verify_flags = list(verify_flags or [])
assert not use_verify or verify_supported
assert not use_verify or verify_flags is not None
self.modules_flags = list(modules_flags or [])
self.use_modules = use_modules
assert not use_modules or modules_flags is not None
self.use_ccache = use_ccache
self.use_warnings = use_warnings
if compile_env is not None:
self.compile_env = dict(compile_env)
else:
self.compile_env = None
self.type = cxx_type
self.version = cxx_version
if self.type is None or self.version is None:
self._initTypeAndVersion()
def isVerifySupported(self):
if self.verify_supported is None:
self.verify_supported = self.hasCompileFlag(['-Xclang',
'-verify-ignore-unexpected'])
if self.verify_supported:
self.verify_flags = [
'-Xclang', '-verify',
'-Xclang', '-verify-ignore-unexpected=note',
'-ferror-limit=1024'
]
return self.verify_supported
def useVerify(self, value=True):
self.use_verify = value
assert not self.use_verify or self.verify_flags is not None
def useModules(self, value=True):
self.use_modules = value
assert not self.use_modules or self.modules_flags is not None
def useCCache(self, value=True):
self.use_ccache = value
def useWarnings(self, value=True):
self.use_warnings = value
def _initTypeAndVersion(self):
# Get compiler type and version
try:
macros = self.dumpMacros()
compiler_type = None
major_ver = minor_ver = patchlevel = None
self.is_nvrtc = False
if '__NVCC__' in macros.keys():
compiler_type = 'nvcc'
major_ver = macros['__CUDACC_VER_MAJOR__']
minor_ver = macros['__CUDACC_VER_MINOR__']
patchlevel = macros['__CUDACC_VER_BUILD__']
if '__LIBCUDACXX_NVRTC_TEST__' in macros.keys():
self.is_nvrtc = True
elif '__NVCOMPILER' in macros.keys():
compiler_type = 'nvhpc'
# NVHPC, unfortunately, adds an extra space between the macro name
# and macro value in their macro dump mode.
major_ver = macros['__NVCOMPILER_MAJOR__'].strip()
minor_ver = macros['__NVCOMPILER_MINOR__'].strip()
patchlevel = macros['__NVCOMPILER_PATCHLEVEL__'].strip()
elif '__INTEL_COMPILER' in macros.keys():
compiler_type = 'icc'
major_ver = int(macros['__INTEL_COMPILER']) / 100
minor_ver = (int(macros['__INTEL_COMPILER']) % 100) / 10
patchlevel = int(macros['__INTEL_COMPILER']) % 10
elif '__clang__' in macros.keys():
compiler_type = 'clang'
# Treat Apple's LLVM fork differently.
if '__apple_build_version__' in macros.keys():
compiler_type = 'apple-clang'
major_ver = macros['__clang_major__']
minor_ver = macros['__clang_minor__']
patchlevel = macros['__clang_patchlevel__']
elif '__GNUC__' in macros.keys():
compiler_type = 'gcc'
major_ver = macros['__GNUC__']
minor_ver = macros['__GNUC_MINOR__']
patchlevel = macros['__GNUC_PATCHLEVEL__']
if '__cplusplus' in macros.keys():
cplusplus = macros['__cplusplus']
if cplusplus[-1] == 'L':
cplusplus = cplusplus[:-1]
cpp_standard = int(cplusplus)
if cpp_standard <= 199711:
default_dialect = "c++03"
elif cpp_standard <= 201103:
default_dialect = "c++11"
elif cpp_standard <= 201402:
default_dialect = "c++14"
elif cpp_standard <= 201703:
default_dialect = "c++17"
else:
default_dialect = "c++20"
else:
default_dialect = "c++03"
self.type = compiler_type
self.version = (major_ver, minor_ver, patchlevel)
self.default_dialect = default_dialect
except:
(self.type, self.version, self.default_dialect, self.is_nvrtc) = \
self.dumpVersion()
if self.type == 'nvcc':
# Treat C++ as CUDA when the compiler is NVCC.
self.source_lang = 'cu'
def _basicCmd(self, source_files, out, mode=CM_Default, flags=[],
input_is_cxx=False):
cmd = []
if self.use_ccache \
and not mode == self.CM_Link \
and not mode == self.CM_PreProcess:
cmd += ['ccache']
cmd += [self.path] + ([self.first_arg] if self.first_arg != '' else [])
if out is not None:
cmd += ['-o', out]
if input_is_cxx:
cmd += ['-x', self.source_lang]
if isinstance(source_files, list):
cmd += source_files
elif isinstance(source_files, str):
cmd += [source_files]
else:
raise TypeError('source_files must be a string or list')
if mode == self.CM_PreProcess:
cmd += ['-E']
elif mode == self.CM_Compile:
cmd += ['-c']
cmd += self.flags
if self.use_verify:
cmd += self.verify_flags
assert mode in [self.CM_Default, self.CM_Compile]
if self.use_modules:
cmd += self.modules_flags
if mode != self.CM_Link:
cmd += self.compile_flags
if self.use_warnings:
cmd += self.warning_flags
if mode != self.CM_PreProcess and mode != self.CM_Compile:
cmd += self.link_flags
cmd += flags
return cmd
def preprocessCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_PreProcess,
input_is_cxx=True)
def compileCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_Compile,
input_is_cxx=True) + ['-c']
def linkCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_Link)
def compileLinkCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags)
def preprocess(self, source_files, out=None, flags=[], cwd=None):
cmd = self.preprocessCmd(source_files, out, flags)
out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compile(self, source_files, out=None, flags=[], cwd=None):
cmd = self.compileCmd(source_files, out, flags)
out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def link(self, source_files, out=None, flags=[], cwd=None):
cmd = self.linkCmd(source_files, out, flags)
out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compileLink(self, source_files, out=None, flags=[],
cwd=None):
cmd = self.compileLinkCmd(source_files, out, flags)
out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compileLinkTwoSteps(self, source_file, out=None, object_file=None,
flags=[], cwd=None):
if not isinstance(source_file, str):
raise TypeError('This function only accepts a single input file')
if object_file is None:
# Create, use and delete a temporary object file if none is given.
with_fn = lambda: libcudacxx.util.guardedTempFilename(suffix='.o')
else:
# Otherwise wrap the filename in a context manager function.
with_fn = lambda: libcudacxx.util.nullContext(object_file)
with with_fn() as object_file:
cc_cmd, cc_stdout, cc_stderr, rc = self.compile(
source_file, object_file, flags=flags, cwd=cwd)
if rc != 0:
return cc_cmd, cc_stdout, cc_stderr, rc
link_cmd, link_stdout, link_stderr, rc = self.link(
object_file, out=out, flags=flags, cwd=cwd)
return (cc_cmd + ['&&'] + link_cmd, cc_stdout + link_stdout,
cc_stderr + link_stderr, rc)
def dumpVersion(self, flags=[], cwd=None):
dumpversion_cpp = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dumpversion.cpp")
with_fn = lambda: libcudacxx.util.guardedTempFilename(suffix=".exe")
with with_fn() as exe:
cmd, out, err, rc = self.compileLink([dumpversion_cpp], out=exe,
flags=flags, cwd=cwd)
if rc != 0:
return ("unknown", (0, 0, 0), "c++03", False)
out, err, rc = libcudacxx.util.executeCommand(exe, env=self.compile_env,
cwd=cwd)
version = None
try:
version = eval(out)
except:
pass
if not (isinstance(version, tuple) and 4 == len(version)):
version = ("unknown", (0, 0, 0), "c++03", False)
return version
def dumpMacros(self, source_files=None, flags=[], cwd=None):
if source_files is None:
source_files = os.devnull
flags = ['-dM'] + flags
cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd)
if rc != 0:
flags = ['-Xcompiler'] + flags
cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd)
if rc != 0:
return cmd, out, err, rc
parsed_macros = {}
lines = [l.strip() for l in out.split('\n') if l.strip()]
for l in lines:
# NVHPC also outputs the file contents from -E -dM for some reason; handle that
if not l.startswith('#define '):
if '__NVCOMPILER' not in parsed_macros.keys():
assert False, "a line not starting with '#define' encountered in predefined macro dump"
else:
continue
l = l[len('#define '):]
macro, _, value = l.partition(' ')
parsed_macros[macro] = value
return parsed_macros
def getTriple(self):
if self.type == "msvc":
return "x86_64-pc-windows-msvc"
cmd = [self.path] + self.flags + ['-dumpmachine']
return libcudacxx.util.capture(cmd).strip()
def hasCompileFlag(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
# Add -Werror to ensure that an unrecognized flag causes a non-zero
# exit code. -Werror is supported on all known non-nvcc compiler types.
if self.type is not None and self.type != 'nvcc' and self.type != 'msvc':
flags += ['-Werror', '-fsyntax-only']
empty_cpp = os.path.join(os.path.dirname(os.path.abspath(__file__)), "empty.cpp")
cmd, out, err, rc = self.compile(empty_cpp, out=os.devnull,
flags=flags)
if out.find('flag is not supported with the configured host compiler') != -1:
return False
if err.find('flag is not supported with the configured host compiler') != -1:
return False
return rc == 0
def addFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.flags += flags
return True
else:
return False
def addCompileFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.compile_flags += flags
return True
else:
return False
def hasWarningFlag(self, flag):
"""
hasWarningFlag - Test if the compiler supports a given warning flag.
Unlike addCompileFlagIfSupported, this function detects when
"-Wno-<warning>" flags are unsupported. If flag is a
"-Wno-<warning>" GCC will not emit an unknown option diagnostic unless
another error is triggered during compilation.
"""
assert isinstance(flag, str)
assert flag.startswith('-W')
if not flag.startswith('-Wno-'):
return self.hasCompileFlag(flag)
flags = ['-Werror', flag]
old_use_warnings = self.use_warnings
self.useWarnings(False)
cmd = self.compileCmd('-', os.devnull, flags)
self.useWarnings(old_use_warnings)
# Remove '-v' because it will cause the command line invocation
# to be printed as part of the error output.
# TODO(EricWF): Are there other flags we need to worry about?
if '-v' in cmd:
cmd.remove('-v')
out, err, rc = libcudacxx.util.executeCommand(
cmd, input=libcudacxx.util.to_bytes('#error\n'))
assert rc != 0
if flag in err:
return False
return True
def addWarningFlagIfSupported(self, flag):
if self.hasWarningFlag(flag):
if flag not in self.warning_flags:
self.warning_flags += [flag]
return True
return False
| libcudacxx-main | .upstream-tests/utils/libcudacxx/compiler.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from contextlib import contextmanager
import errno
import os
import platform
import signal
import subprocess
import sys
import tempfile
import threading
# FIXME: Most of these functions are cribbed from LIT
def to_bytes(str):
# Encode to UTF-8 to get binary data.
if isinstance(str, bytes):
return str
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes)
except UnicodeError:
return str(bytes)
def cleanFile(filename):
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def guardedTempFilename(suffix='', prefix='', dir=None):
# Creates and yeilds a temporary filename within a with statement. The file
# is removed upon scope exit.
handle, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(handle)
yield name
cleanFile(name)
@contextmanager
def guardedFilename(name):
# yeilds a filename within a with statement. The file is removed upon scope
# exit.
yield name
cleanFile(name)
@contextmanager
def nullContext(value):
# yeilds a variable within a with statement. No action is taken upon scope
# exit.
yield value
def makeReport(cmd, out, err, rc):
report = "Command: %s\n" % cmd
report += "Exit Code: %d\n" % rc
if out:
report += "Standard Output:\n--\n%s--\n" % out
if err:
report += "Standard Error:\n--\n%s--\n" % err
report += '\n'
return report
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output. Raises a CalledProcessError if the command
exits with a non-zero status."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
out = convert_string(out)
err = convert_string(err)
if p.returncode != 0:
raise subprocess.CalledProcessError(cmd=args,
returncode=p.returncode,
output="{}\n{}".format(out, err))
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
if input is not None:
input = to_bytes(input)
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out,err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
if platform.system() == 'AIX':
subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True)
else:
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
def executeCommandVerbose(cmd, *args, **kwargs):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = executeCommand(cmd, *args, **kwargs)
if exitCode != 0:
report = makeReport(cmd, out, err, exitCode)
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
return out, err, exitCode
| libcudacxx-main | .upstream-tests/utils/libcudacxx/util.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""libcxx python utilities"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| libcudacxx-main | .upstream-tests/utils/libcudacxx/__init__.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import inspect
def trace_function(function, log_calls, log_results, label=''):
def wrapper(*args, **kwargs):
kwarg_strs = ['{}={}'.format(k, v) for (k, v) in kwargs]
arg_str = ', '.join([str(a) for a in args] + kwarg_strs)
call_str = '{}({})'.format(function.func_name, arg_str)
# Perform the call itself, logging before, after, and anything thrown.
try:
if log_calls:
print('{}: Calling {}'.format(label, call_str))
res = function(*args, **kwargs)
if log_results:
print('{}: {} -> {}'.format(label, call_str, res))
return res
except Exception as ex:
if log_results:
print('{}: {} raised {}'.format(label, call_str, type(ex)))
raise ex
return wrapper
def trace_object(obj, log_calls, log_results, label=''):
for name, member in inspect.getmembers(obj):
if inspect.ismethod(member):
# Skip meta-functions, decorate everything else
if not member.func_name.startswith('__'):
setattr(obj, name, trace_function(member, log_calls,
log_results, label))
return obj
| libcudacxx-main | .upstream-tests/utils/libcudacxx/test/tracing.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import locale
import os
import platform
import pkgutil
import pipes
import re
import shlex
import shutil
import sys
from libcudacxx.compiler import CXXCompiler
from libcudacxx.test.target_info import make_target_info
from libcudacxx.test.executor import *
from libcudacxx.test.tracing import *
import libcudacxx.util
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.is_windows = platform.system() == 'Windows'
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcudacxx_src_root = None
self.libcudacxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict(os.environ)
self.use_target = False
self.use_system_cxx_lib = False
self.use_clang_verify = False
self.long_tests = None
self.execute_external = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def get_modules_enabled(self):
return self.get_lit_bool('enable_modules',
default=False,
env_var='LIBCUDACXX_ENABLE_MODULES')
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.is_windows:
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.configure_executor()
self.configure_use_system_cxx_lib()
self.configure_target_info()
self.configure_cxx()
self.configure_triple()
self.configure_deployment()
self.configure_src_root()
self.configure_obj_root()
self.configure_cxx_stdlib_under_test()
self.configure_cxx_library_root()
self.configure_use_clang_verify()
self.configure_use_thread_safety()
self.configure_no_execute()
self.configure_execute_external()
self.configure_ccache()
self.configure_compile_flags()
self.configure_filesystem_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_color_diagnostics()
self.configure_debug_mode()
self.configure_warnings()
self.configure_sanitizer()
self.configure_coverage()
self.configure_modules()
self.configure_coroutines()
self.configure_substitutions()
self.configure_features()
def print_config_info(self):
# Print the final compile and link flags.
self.lit_config.note('Using compiler: %s %s' % (self.cxx.path, self.cxx.first_arg))
self.lit_config.note('Using flags: %s' % self.cxx.flags)
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
self.lit_config.note('Using compile flags: %s'
% self.cxx.compile_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
self.lit_config.note('Using link flags: %s' % self.cxx.link_flags)
# Print as list to prevent "set([...])" from being printed.
self.lit_config.note('Using available_features: %s' %
list(self.config.available_features))
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcudacxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.execute_external,
self.executor,
exec_env=self.exec_env)
def configure_executor(self):
exec_str = self.get_lit_conf('executor', "None")
exec_timeout = self.get_lit_conf('maxIndividualTestTime', "None")
te = eval(exec_str)
if te:
self.lit_config.note("Using executor: %r" % exec_str)
if self.lit_config.useValgrind:
# We have no way of knowing where in the chain the
# ValgrindExecutor is supposed to go. It is likely
# that the user wants it at the end, but we have no
# way of getting at that easily.
selt.lit_config.fatal("Cannot infer how to create a Valgrind "
" executor.")
else:
te = LocalExecutor()
te.timeout = 0
if exec_timeout:
te.timeout = exec_timeout
if self.lit_config.useValgrind:
te = ValgrindExecutor(self.lit_config.valgrindArgs, te)
self.executor = te
def configure_target_info(self):
self.target_info = make_target_info(self)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
cxx_first_arg = self.get_lit_conf('cxx_first_arg')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx) == 'clang-cl.exe'
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcudacxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(cxx, cxx_first_arg) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
cxx_type = self.cxx.type
if cxx_type is not None:
assert self.cxx.version is not None
maj_v, min_v, patch_v = self.cxx.version
self.config.available_features.add(cxx_type)
self.config.available_features.add('%s-%s' % (cxx_type, maj_v))
self.config.available_features.add('%s-%s.%s' % (
cxx_type, maj_v, min_v))
self.config.available_features.add('%s-%s.%s.%s' % (
cxx_type, maj_v, min_v, patch_v))
self.lit_config.note("detected cxx.type as: {}".format(
self.cxx.type))
self.lit_config.note("detected cxx.version as: {}".format(
self.cxx.version))
self.lit_config.note("detected cxx.default_dialect as: {}".format(
self.cxx.default_dialect))
self.lit_config.note("detected cxx.is_nvrtc as: {}".format(
self.cxx.is_nvrtc))
self.cxx.compile_env = dict(os.environ)
# 'CCACHE_CPP2' prevents ccache from stripping comments while
# preprocessing. This is required to prevent stripping of '-verify'
# comments.
self.cxx.compile_env['CCACHE_CPP2'] = '1'
if self.cxx.type == 'nvcc':
nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler')
if len(nvcc_host_compiler.strip()) == 0:
if platform.system() == 'Darwin':
nvcc_host_compiler = 'clang'
elif platform.system() == 'Windows':
nvcc_host_compiler = 'cl.exe'
else:
nvcc_host_compiler = 'gcc'
self.host_cxx = CXXCompiler(nvcc_host_compiler, None)
self.host_cxx_type = self.host_cxx.type
if self.host_cxx_type is not None:
assert self.host_cxx.version is not None
maj_v, min_v, _ = self.host_cxx.version
self.config.available_features.add(self.host_cxx_type)
self.config.available_features.add('%s-%s' % (
self.host_cxx_type, maj_v))
self.config.available_features.add('%s-%s.%s' % (
self.host_cxx_type, maj_v, min_v))
self.lit_config.note("detected host_cxx.type as: {}".format(
self.host_cxx.type))
self.lit_config.note("detected host_cxx.version as: {}".format(
self.host_cxx.version))
self.lit_config.note("detected host_cxx.default_dialect as: {}".format(
self.host_cxx.default_dialect))
self.lit_config.note("detected host_cxx.is_nvrtc as: {}".format(
self.host_cxx.is_nvrtc))
if 'icc' in self.config.available_features:
self.cxx.link_flags += ['-lirc']
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = _prefixed_env_list('INCLUDE', '-isystem')
link_flags = _prefixed_env_list('LIB', '-L')
for path in _split_env_var('LIB'):
self.add_path(self.exec_env, path)
return CXXCompiler(clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def _dump_macros_verbose(self, *args, **kwargs):
macros_or_error = self.cxx.dumpMacros(*args, **kwargs)
if isinstance(macros_or_error, tuple):
cmd, out, err, rc = macros_or_error
report = libcudacxx.util.makeReport(cmd, out, err, rc)
report += "Compiler failed unexpectedly when dumping macros!"
self.lit_config.fatal(report)
return None
assert isinstance(macros_or_error, dict)
return macros_or_error
def configure_src_root(self):
self.libcudacxx_src_root = self.get_lit_conf(
'libcudacxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcudacxx_obj_root = self.get_lit_conf('libcudacxx_obj_root')
if not self.libcudacxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcudacxx'),
os.path.join(self.project_obj_root, 'projects', 'libcudacxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcudacxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcudacxx_obj_root = possible_root
break
else:
self.libcudacxx_obj_root = self.project_obj_root
def configure_cxx_library_root(self):
self.cxx_library_root = self.get_lit_conf('cxx_library_root',
self.libcudacxx_obj_root)
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root',
self.cxx_library_root)
def configure_use_system_cxx_lib(self):
# This test suite supports testing against either the system library or
# the locally built one; the former mode is useful for testing ABI
# compatibility between the current headers and a shipping dynamic
# library.
# Default to testing against the locally built libc++ library.
self.use_system_cxx_lib = self.get_lit_conf('use_system_cxx_lib')
if self.use_system_cxx_lib == 'true':
self.use_system_cxx_lib = True
elif self.use_system_cxx_lib == 'false':
self.use_system_cxx_lib = False
elif self.use_system_cxx_lib:
assert os.path.isdir(self.use_system_cxx_lib), "the specified use_system_cxx_lib parameter (%s) is not a valid directory" % self.use_system_cxx_lib
self.use_system_cxx_lib = os.path.abspath(self.use_system_cxx_lib)
self.lit_config.note(
"inferred use_system_cxx_lib as: %r" % self.use_system_cxx_lib)
def configure_cxx_stdlib_under_test(self):
self.cxx_stdlib_under_test = self.get_lit_conf(
'cxx_stdlib_under_test', 'libc++')
if self.cxx_stdlib_under_test not in \
['libc++', 'libstdc++', 'msvc', 'cxx_default']:
self.lit_config.fatal(
'unsupported value for "cxx_stdlib_under_test": %s'
% self.cxx_stdlib_under_test)
self.config.available_features.add(self.cxx_stdlib_under_test)
if self.cxx_stdlib_under_test == 'libstdc++':
self.config.available_features.add('libstdc++')
# Manually enable the experimental and filesystem tests for libstdc++
# if the options aren't present.
# FIXME this is a hack.
if self.get_lit_conf('enable_experimental') is None:
self.config.enable_experimental = 'true'
def configure_use_clang_verify(self):
'''If set, run clang with -verify on failing tests.'''
self.use_clang_verify = self.get_lit_bool('use_clang_verify')
if self.use_clang_verify is None:
# NOTE: We do not test for the -verify flag directly because
# -verify will always exit with non-zero on an empty file.
self.use_clang_verify = self.cxx.isVerifySupported()
self.lit_config.note(
"inferred use_clang_verify as: %r" % self.use_clang_verify)
if self.use_clang_verify:
self.config.available_features.add('verify-support')
def configure_use_thread_safety(self):
'''If set, run clang with -verify on failing tests.'''
has_thread_safety = self.cxx.hasCompileFlag('-Werror=thread-safety')
if has_thread_safety:
self.cxx.compile_flags += ['-Werror=thread-safety']
self.config.available_features.add('thread-safety')
self.lit_config.note("enabling thread-safety annotations")
def configure_execute_external(self):
# Choose between lit's internal shell pipeline runner and a real shell.
# If LIT_USE_INTERNAL_SHELL is in the environment, we use that as the
# default value. Otherwise we ask the target_info.
use_lit_shell_default = os.environ.get('LIT_USE_INTERNAL_SHELL')
if use_lit_shell_default is not None:
use_lit_shell_default = use_lit_shell_default != '0'
else:
use_lit_shell_default = self.target_info.use_lit_shell_default()
# Check for the command line parameter using the default value if it is
# not present.
use_lit_shell = self.get_lit_bool('use_lit_shell',
use_lit_shell_default)
self.execute_external = not use_lit_shell
def configure_no_execute(self):
if type(self.executor) == NoopExecutor:
self.config.available_features.add('no_execute')
def configure_ccache(self):
use_ccache_default = os.environ.get('LIBCUDACXX_USE_CCACHE') is not None
use_ccache = self.get_lit_bool('use_ccache', use_ccache_default)
if use_ccache:
self.cxx.use_ccache = True
self.lit_config.note('enabling ccache')
def add_deployment_feature(self, feature):
(arch, name, version) = self.config.deployment
self.config.available_features.add('%s=%s-%s' % (feature, arch, name))
self.config.available_features.add('%s=%s' % (feature, name))
self.config.available_features.add('%s=%s%s' % (feature, name, version))
def configure_features(self):
additional_features = self.get_lit_conf('additional_features')
if additional_features:
for f in additional_features.split(','):
self.config.available_features.add(f.strip())
self.target_info.add_locale_features(self.config.available_features)
target_platform = self.target_info.platform()
# Write an "available feature" that combines the triple when
# use_system_cxx_lib is enabled. This is so that we can easily write
# XFAIL markers for tests that are known to fail with versions of
# libc++ as were shipped with a particular triple.
if self.use_system_cxx_lib:
self.config.available_features.add('with_system_cxx_lib')
self.config.available_features.add(
'with_system_cxx_lib=%s' % self.config.target_triple)
# Add subcomponents individually.
target_components = self.config.target_triple.split('-')
for component in target_components:
self.config.available_features.add(
'with_system_cxx_lib=%s' % component)
# Add available features for more generic versions of the target
# triple attached to with_system_cxx_lib.
if self.use_deployment:
self.add_deployment_feature('with_system_cxx_lib')
# Configure the availability feature. Availability is only enabled
# with libc++, because other standard libraries do not provide
# availability markup.
if self.use_deployment and self.cxx_stdlib_under_test == 'libc++':
self.config.available_features.add('availability')
self.add_deployment_feature('availability')
if platform.system() == 'Darwin':
self.config.available_features.add('apple-darwin')
# Insert the platform name into the available features as a lower case.
self.config.available_features.add(target_platform)
# Simulator testing can take a really long time for some of these tests
# so add a feature check so we can REQUIRES: long_tests in them
self.long_tests = self.get_lit_bool('long_tests')
if self.long_tests is None:
# Default to running long tests.
self.long_tests = True
self.lit_config.note(
"inferred long_tests as: %r" % self.long_tests)
if self.long_tests:
self.config.available_features.add('long_tests')
if not self.get_lit_bool('enable_filesystem', default=True):
self.config.available_features.add('c++filesystem-disabled')
self.config.available_features.add('dylib-has-no-filesystem')
# Run a compile test for the -fsized-deallocation flag. This is needed
# in test/std/language.support/support.dynamic/new.delete
if self.cxx.hasCompileFlag('-fsized-deallocation'):
self.config.available_features.add('-fsized-deallocation')
if self.cxx.hasCompileFlag('-faligned-allocation'):
self.config.available_features.add('-faligned-allocation')
else:
# FIXME remove this once more than just clang-4.0 support
# C++17 aligned allocation.
self.config.available_features.add('no-aligned-allocation')
if self.cxx.hasCompileFlag('-fdelayed-template-parsing'):
self.config.available_features.add('fdelayed-template-parsing')
if self.get_lit_bool('has_libatomic', False):
self.config.available_features.add('libatomic')
if 'msvc' not in self.config.available_features:
macros = self._dump_macros_verbose()
if '__cpp_if_constexpr' not in macros:
self.config.available_features.add('libcpp-no-if-constexpr')
if '__cpp_structured_bindings' not in macros:
self.config.available_features.add('libcpp-no-structured-bindings')
if '__cpp_deduction_guides' not in macros or \
intMacroValue(macros['__cpp_deduction_guides']) < 201611:
self.config.available_features.add('libcpp-no-deduction-guides')
if self.is_windows:
self.config.available_features.add('windows')
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCUDACXX-WINDOWS-FIXME')
if 'msvc' not in self.config.available_features:
# Attempt to detect the glibc version by querying for __GLIBC__
# in 'features.h'.
macros = self.cxx.dumpMacros(flags=['-include', 'features.h'])
if isinstance(macros, dict) and '__GLIBC__' in macros:
maj_v, min_v = (macros['__GLIBC__'], macros['__GLIBC_MINOR__'])
self.config.available_features.add('glibc')
self.config.available_features.add('glibc-%s' % maj_v)
self.config.available_features.add('glibc-%s.%s' % (maj_v, min_v))
libcudacxx_gdb = self.get_lit_conf('libcudacxx_gdb')
if libcudacxx_gdb and 'NOTFOUND' not in libcudacxx_gdb:
self.config.available_features.add('libcudacxx_gdb')
self.cxx.libcudacxx_gdb = libcudacxx_gdb
# Support Objective-C++ only on MacOS and if the compiler supports it.
if self.target_info.platform() == "darwin" and \
self.target_info.is_host_macosx() and \
self.cxx.hasCompileFlag(["-x", "objective-c++", "-fobjc-arc"]):
self.config.available_features.add("objective-c++")
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.is_windows:
# FIXME: Can we remove this?
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
if 'msvc' in self.config.available_features:
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-Xcompiler']
self.cxx.compile_flags += ['/bigobj']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
compute_archs = self.get_lit_conf('compute_archs')
if self.cxx.is_nvrtc is True:
self.config.available_features.add("nvrtc")
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['--extended-lambda']
pre_sm_32 = True
pre_sm_60 = True
pre_sm_70 = True
pre_sm_80 = True
pre_sm_90 = True
if compute_archs and self.cxx.type == 'nvcc':
pre_sm_32 = False
pre_sm_60 = False
pre_sm_70 = False
pre_sm_80 = False
pre_sm_90 = False
compute_archs = [int(a) for a in sorted(shlex.split(compute_archs))]
for arch in compute_archs:
if arch < 32: pre_sm_32 = True
if arch < 60: pre_sm_60 = True
if arch < 70: pre_sm_70 = True
if arch < 80: pre_sm_80 = True
if arch < 90: pre_sm_90 = True
arch_flag = '-gencode=arch=compute_{0},code=sm_{0}'.format(arch)
self.cxx.compile_flags += [arch_flag]
enable_compute_future = self.get_lit_conf('enable_compute_future')
if enable_compute_future:
arch_flag = '-gencode=arch=compute_{0},code=compute_{0}'.format(arch)
self.cxx.compile_flags += [arch_flag]
if pre_sm_32:
self.config.available_features.add("pre-sm-32")
if pre_sm_60:
self.config.available_features.add("pre-sm-60")
if pre_sm_70:
self.config.available_features.add("pre-sm-70")
if pre_sm_80:
self.config.available_features.add("pre-sm-80")
if pre_sm_90:
self.config.available_features.add("pre-sm-90")
def configure_default_compile_flags(self):
nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler')
if nvcc_host_compiler and self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-ccbin={0}'.format(nvcc_host_compiler)]
# Try and get the std version from the command line. Fall back to
# default given in lit.site.cfg is not present. If default is not
# present then force c++11.
std = self.get_lit_conf('std')
if not std:
# Choose the newest possible language dialect if none is given.
possible_stds = ['c++20', 'c++2a', 'c++17', 'c++1z', 'c++14', 'c++11',
'c++03']
if self.cxx.type == 'gcc':
maj_v, _, _ = self.cxx.version
maj_v = int(maj_v)
if maj_v < 6:
possible_stds.remove('c++1z')
possible_stds.remove('c++17')
# FIXME: How many C++14 tests actually fail under GCC 5 and 6?
# Should we XFAIL them individually instead?
if maj_v < 6:
possible_stds.remove('c++14')
for s in possible_stds:
cxx = self.cxx
success = True
if self.cxx.type == 'nvcc':
# NVCC warns, but doesn't error, if the host compiler
# doesn't support the dialect. It's also possible that the
# host compiler supports the dialect, but NVCC doesn't.
# So, first we need to check if NVCC supports the dialect...
if not self.cxx.hasCompileFlag('-std=%s' % s):
# If it doesn't, give up on this dialect.
success = False
# ... then we need to check if host compiler supports the
# dialect.
cxx = self.host_cxx
if cxx.type == 'msvc':
if not cxx.hasCompileFlag('/std:%s' % s):
success = False
else:
if not cxx.hasCompileFlag('-std=%s' % s):
success = False
if success:
std = s
self.lit_config.note('inferred language dialect as: %s' % std)
break
if std:
# We found a dialect flag.
if self.cxx.type == 'msvc':
self.cxx.compile_flags += ['/std:{0}'.format(std)]
else:
self.cxx.compile_flags += ['-std={0}'.format(std)]
if not std:
# There is no dialect flag. This happens with older MSVC.
if self.cxx.type == 'nvcc':
std = self.host_cxx.default_dialect
else:
std = self.cxx.default_dialect
self.lit_config.note('using default language dialect: %s' % std)
std_feature = std.replace('gnu++', 'c++')
std_feature = std.replace('1z', '17')
std_feature = std.replace('2a', '20')
self.config.available_features.add(std_feature)
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
# Configure feature flags.
self.configure_compile_flags_exceptions()
self.configure_compile_flags_rtti()
self.configure_compile_flags_abi_version()
enable_32bit = self.get_lit_bool('enable_32bit', False)
if enable_32bit:
self.cxx.flags += ['-m32']
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.is_windows and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
if self.use_target:
if not self.cxx.addFlagIfSupported(
['--target=' + self.config.target_triple]):
self.lit_config.warning('use_target is true but --target is '\
'not supported by the compiler')
if self.use_deployment:
arch, name, version = self.config.deployment
self.cxx.flags += ['-arch', arch]
self.cxx.flags += ['-m' + name + '-version-min=' + version]
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcudacxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
# FIXME(EricWF): variant_size.pass.cpp requires a slightly larger
# template depth with older Clang versions.
self.cxx.addFlagIfSupported('-ftemplate-depth=270')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcudacxx_src_root, 'test', 'support')
self.configure_config_site_header()
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.is_windows:
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.is_windows and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers == '' or (cxx_headers is None
and self.cxx_stdlib_under_test != 'libc++'):
self.lit_config.note('using the system cxx headers')
return
if self.cxx.type != 'nvcc' and self.cxx.type != 'nvhpc':
self.cxx.compile_flags += ['-nostdinc++']
if cxx_headers is None:
cxx_headers = os.path.join(self.libcudacxx_src_root, 'include')
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='%s' is not a directory."
% cxx_headers)
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcudacxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcudacxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_config_site_header(self):
# Check for a possible __config_site in the build directory. We
# use this if it exists.
if self.libcudacxx_obj_root is None:
return
config_site_header = os.path.join(self.libcudacxx_obj_root, '__config_site')
if not os.path.isfile(config_site_header):
return
contained_macros = self.parse_config_site_and_add_features(
config_site_header)
self.lit_config.note('Using __config_site header %s with macros: %r'
% (config_site_header, contained_macros))
# FIXME: This must come after the call to
# 'parse_config_site_and_add_features(...)' in order for it to work.
self.cxx.compile_flags += ['-include', config_site_header]
def parse_config_site_and_add_features(self, header):
""" parse_config_site_and_add_features - Deduce and add the test
features that that are implied by the #define's in the __config_site
header. Return a dictionary containing the macros found in the
'__config_site' header.
"""
# MSVC can't dump macros, so we just give up.
if 'msvc' in self.config.available_features:
return {}
# Parse the macro contents of __config_site by dumping the macros
# using 'c++ -dM -E' and filtering the predefines.
predefines = self._dump_macros_verbose()
macros = self._dump_macros_verbose(header)
feature_macros_keys = set(macros.keys()) - set(predefines.keys())
feature_macros = {}
for k in feature_macros_keys:
feature_macros[k] = macros[k]
# We expect the header guard to be one of the definitions
assert '_LIBCUDACXX_CONFIG_SITE' in feature_macros
del feature_macros['_LIBCUDACXX_CONFIG_SITE']
# The __config_site header should be non-empty. Otherwise it should
# have never been emitted by CMake.
assert len(feature_macros) > 0
# FIXME: This is a hack that should be fixed using module maps.
# If modules are enabled then we have to lift all of the definitions
# in __config_site onto the command line.
for m in feature_macros:
define = '-D%s' % m
if feature_macros[m]:
define += '=%s' % (feature_macros[m])
self.cxx.modules_flags += [define]
if self.cxx.hasCompileFlag('-Wno-macro-redefined'):
self.cxx.compile_flags += ['-Wno-macro-redefined']
# Transform each macro name into the feature name used in the tests.
# Ex. _LIBCUDACXX_HAS_NO_THREADS -> libcpp-has-no-threads
for m in feature_macros:
if m == '_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS' or \
m == '_LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT':
continue
if m == '_LIBCUDACXX_ABI_VERSION':
self.config.available_features.add('libcpp-abi-version-v%s'
% feature_macros[m])
continue
if m == '_LIBCUDACXX_NO_VCRUNTIME':
self.config.available_features.add('libcpp-no-vcruntime')
continue
assert m.startswith('_LIBCUDACXX_HAS_') or m.startswith('_LIBCUDACXX_ABI_')
m = m.lower()[1:].replace('_', '-')
self.config.available_features.add(m)
return feature_macros
def configure_compile_flags_exceptions(self):
enable_exceptions = self.get_lit_bool('enable_exceptions', True)
if not enable_exceptions:
self.config.available_features.add('libcpp-no-exceptions')
if 'nvhpc' in self.config.available_features:
# NVHPC reports all expressions as `noexcept(true)` with its
# "no exceptions" mode. Override the setting from CMake as
# a temporary workaround for that.
pass
# TODO: I don't know how to shut off exceptions with MSVC.
elif 'msvc' not in self.config.available_features:
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-Xcompiler']
self.cxx.compile_flags += ['-fno-exceptions']
def configure_compile_flags_rtti(self):
enable_rtti = self.get_lit_bool('enable_rtti', True)
if not enable_rtti:
self.config.available_features.add('libcpp-no-rtti')
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-Xcompiler']
if 'nvhpc' in self.config.available_features:
self.cxx.compile_flags += ['--no_rtti']
elif 'msvc' in self.config.available_features:
self.cxx.compile_flags += ['/GR-']
self.cxx.compile_flags += ['-D_SILENCE_CXX20_CISO646_REMOVED_WARNING']
else:
self.cxx.compile_flags += ['-fno-rtti']
self.cxx.compile_flags += ['-D_LIBCUDACXX_NO_RTTI']
def configure_compile_flags_abi_version(self):
abi_version = self.get_lit_conf('abi_version', '').strip()
abi_unstable = self.get_lit_bool('abi_unstable')
# Only add the ABI version when it is non-default.
# FIXME(EricWF): Get the ABI version from the "__config_site".
if abi_version and abi_version != '1':
self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_VERSION=' + abi_version]
if abi_unstable:
self.config.available_features.add('libcpp-abi-unstable')
self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_UNSTABLE']
def configure_filesystem_compile_flags(self):
if not self.get_lit_bool('enable_filesystem', default=True):
return
static_env = os.path.join(self.libcudacxx_src_root, 'test', 'std',
'input.output', 'filesystems', 'Inputs', 'static_test_env')
static_env = os.path.realpath(static_env)
assert os.path.isdir(static_env)
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_STATIC_TEST_ROOT="%s"' % static_env]
dynamic_env = os.path.join(self.config.test_exec_root,
'filesystem', 'Output', 'dynamic_env')
dynamic_env = os.path.realpath(dynamic_env)
if not os.path.isdir(dynamic_env):
os.makedirs(dynamic_env)
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT="%s"' % dynamic_env]
self.exec_env['LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT'] = ("%s" % dynamic_env)
dynamic_helper = os.path.join(self.libcudacxx_src_root, 'test', 'support',
'filesystem_dynamic_test_helper.py')
assert os.path.isfile(dynamic_helper)
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_HELPER="%s %s"'
% (sys.executable, dynamic_helper)]
def configure_link_flags(self):
nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler')
if nvcc_host_compiler and self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-ccbin={0}'.format(nvcc_host_compiler)]
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.get_lit_conf('name') != 'libcu++':
if 'nvhpc' not in self.config.available_features or not self.cxx.is_nvrtc:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler']
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.is_windows and 'msvc' not in self.config.available_features:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler']
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.config.available_features.add('c++experimental')
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if not self.use_system_cxx_lib:
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.is_windows and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.is_windows:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + self.cxx_runtime_root + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.is_windows and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
elif os.path.isdir(str(self.use_system_cxx_lib)):
self.cxx.link_flags += ['-L' + self.use_system_cxx_lib]
if not self.is_windows:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + self.cxx_runtime_root + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
self.use_system_cxx_lib]
if self.is_windows and self.link_shared:
self.add_path(self.cxx.compile_env, self.use_system_cxx_lib)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
self.abi_library_root = self.get_lit_conf('abi_library_path')
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if not self.is_windows:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + self.cxx_runtime_root + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
self.abi_library_root]
else:
self.add_path(self.exec_env, self.abi_library_root)
def configure_link_flags_cxx_library(self):
libcxx_experimental = self.get_lit_bool('enable_experimental', default=False)
if libcxx_experimental:
self.config.available_features.add('c++experimental')
self.cxx.link_flags += ['-lc++experimental']
if self.link_shared:
self.cxx.link_flags += ['-lc++']
elif self.cxx.type != 'nvcc' and self.cxx.type != 'nvhpc':
cxx_library_root = self.get_lit_conf('cxx_library_root')
if cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
cxxabi_library_root = self.get_lit_conf('abi_library_path')
if cxxabi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(cxxabi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt']]
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.is_windows:
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_color_diagnostics(self):
use_color = self.get_lit_conf('color_diagnostics')
if use_color is None:
use_color = os.environ.get('LIBCXX_COLOR_DIAGNOSTICS')
if use_color is None:
return
if use_color != '':
self.lit_config.fatal('Invalid value for color_diagnostics "%s".'
% use_color)
color_flag = '-fdiagnostics-color=always'
# Check if the compiler supports the color diagnostics flag. Issue a
# warning if it does not since color diagnostics have been requested.
if not self.cxx.hasCompileFlag(color_flag):
self.lit_config.warning(
'color diagnostics have been requested but are not supported '
'by the compiler')
else:
self.cxx.flags += [color_flag]
def configure_debug_mode(self):
debug_level = self.get_lit_conf('debug_level', None)
if not debug_level:
return
if debug_level not in ['0', '1']:
self.lit_config.fatal('Invalid value for debug_level "%s".'
% debug_level)
self.cxx.compile_flags += ['-D_LIBCUDACXX_DEBUG=%s' % debug_level]
def configure_warnings(self):
default_enable_warnings = 'clang' in self.config.available_features or \
'msvc' in self.config.available_features or \
'nvcc' in self.config.available_features
enable_warnings = self.get_lit_bool('enable_warnings',
default_enable_warnings)
self.cxx.useWarnings(enable_warnings)
if 'nvcc' in self.config.available_features:
self.cxx.warning_flags += [ '-Xcudafe', '--display_error_number' ]
self.cxx.warning_flags += [ '-Werror', 'all-warnings' ]
if 'msvc' in self.config.available_features:
self.cxx.warning_flags += [ '-Xcompiler', '/W4', '-Xcompiler', '/WX' ]
# warning C4100: 'quack': unreferenced formal parameter
self.cxx.warning_flags += [ '-Xcompiler', '-wd4100' ]
# warning C4127: conditional expression is constant
self.cxx.warning_flags += [ '-Xcompiler', '-wd4127' ]
# warning C4180: qualifier applied to function type has no meaning; ignored
self.cxx.warning_flags += [ '-Xcompiler', '-wd4180' ]
# warning C4309: 'moo': truncation of constant value
self.cxx.warning_flags += [ '-Xcompiler', '-wd4309' ]
# warning C4996: deprecation warnings
self.cxx.warning_flags += [ '-Xcompiler', '-wd4996' ]
else:
# TODO: Re-enable soon.
def addIfHostSupports(flag):
if hasattr(self, 'host_cxx') and self.host_cxx.hasWarningFlag(flag):
self.cxx.warning_flags += [ '-Xcompiler', flag ]
addIfHostSupports('-Wall')
addIfHostSupports('-Wextra')
addIfHostSupports('-Werror')
addIfHostSupports('-Wno-literal-suffix') # GCC warning about reserved UDLs
addIfHostSupports('-Wno-user-defined-literals') # Clang warning about reserved UDLs
addIfHostSupports('-Wno-unused-parameter')
addIfHostSupports('-Wno-deprecated-declarations')
addIfHostSupports('-Wno-noexcept-type')
addIfHostSupports('-Wno-unused-function')
if 'gcc-4.8' in self.config.available_features:
# GCC pre-GCC5 spuriously generates these on reasonable aggregate initialization.
addIfHostSupports('-Wno-missing-field-initializers')
# TODO: port the warning disables from the non-NVCC path?
self.cxx.warning_flags += [ '-D_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER' ]
pass
else:
self.cxx.warning_flags += [
'-D_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER',
'-Wall', '-Wextra', '-Werror'
]
if self.cxx.hasWarningFlag('-Wuser-defined-warnings'):
self.cxx.warning_flags += ['-Wuser-defined-warnings']
self.config.available_features.add('diagnose-if-support')
self.cxx.addWarningFlagIfSupported('-Wshadow')
self.cxx.addWarningFlagIfSupported('-Wno-unused-command-line-argument')
self.cxx.addWarningFlagIfSupported('-Wno-attributes')
self.cxx.addWarningFlagIfSupported('-Wno-pessimizing-move')
self.cxx.addWarningFlagIfSupported('-Wno-c++11-extensions')
self.cxx.addWarningFlagIfSupported('-Wno-user-defined-literals')
self.cxx.addWarningFlagIfSupported('-Wno-noexcept-type')
self.cxx.addWarningFlagIfSupported('-Wno-aligned-allocation-unavailable')
# These warnings should be enabled in order to support the MSVC
# team using the test suite; They enable the warnings below and
# expect the test suite to be clean.
self.cxx.addWarningFlagIfSupported('-Wsign-compare')
self.cxx.addWarningFlagIfSupported('-Wunused-variable')
self.cxx.addWarningFlagIfSupported('-Wunused-parameter')
self.cxx.addWarningFlagIfSupported('-Wunreachable-code')
std = self.get_lit_conf('std', None)
if std in ['c++98', 'c++03']:
if 'nvcc' not in self.config.available_features:
# The '#define static_assert' provided by libc++ in C++03 mode
# causes an unused local typedef whenever it is used.
self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef')
def configure_sanitizer(self):
san = self.get_lit_conf('use_sanitizer', '').strip()
if san:
self.target_info.add_sanitizer_features(san, self.config.available_features)
# Search for llvm-symbolizer along the compiler path first
# and then along the PATH env variable.
symbolizer_search_paths = os.environ.get('PATH', '')
cxx_path = libcudacxx.util.which(self.cxx.path)
if cxx_path is not None:
symbolizer_search_paths = (
os.path.dirname(cxx_path) +
os.pathsep + symbolizer_search_paths)
llvm_symbolizer = libcudacxx.util.which('llvm-symbolizer',
symbolizer_search_paths)
def add_ubsan():
self.cxx.flags += ['-fsanitize=undefined',
'-fno-sanitize=float-divide-by-zero',
'-fno-sanitize-recover=all']
self.exec_env['UBSAN_OPTIONS'] = 'print_stacktrace=1'
self.config.available_features.add('ubsan')
# Setup the sanitizer compile flags
self.cxx.flags += ['-g', '-fno-omit-frame-pointer']
if san == 'Address' or san == 'Address;Undefined' or san == 'Undefined;Address':
self.cxx.flags += ['-fsanitize=address']
if llvm_symbolizer is not None:
self.exec_env['ASAN_SYMBOLIZER_PATH'] = llvm_symbolizer
# FIXME: Turn ODR violation back on after PR28391 is resolved
# https://bugs.llvm.org/show_bug.cgi?id=28391
self.exec_env['ASAN_OPTIONS'] = 'detect_odr_violation=0'
self.config.available_features.add('asan')
self.config.available_features.add('sanitizer-new-delete')
self.cxx.compile_flags += ['-O1']
if san == 'Address;Undefined' or san == 'Undefined;Address':
add_ubsan()
elif san == 'Memory' or san == 'MemoryWithOrigins':
self.cxx.flags += ['-fsanitize=memory']
if san == 'MemoryWithOrigins':
self.cxx.compile_flags += [
'-fsanitize-memory-track-origins']
if llvm_symbolizer is not None:
self.exec_env['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer
self.config.available_features.add('msan')
self.config.available_features.add('sanitizer-new-delete')
self.cxx.compile_flags += ['-O1']
elif san == 'Undefined':
add_ubsan()
self.cxx.compile_flags += ['-O2']
elif san == 'Thread':
self.cxx.flags += ['-fsanitize=thread']
self.config.available_features.add('tsan')
self.config.available_features.add('sanitizer-new-delete')
else:
self.lit_config.fatal('unsupported value for '
'use_sanitizer: {0}'.format(san))
san_lib = self.get_lit_conf('sanitizer_library')
if san_lib:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + os.path.dirname(san_lib) + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
os.path.dirname(san_lib)]
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def configure_coroutines(self):
if self.cxx.hasCompileFlag('-fcoroutines-ts'):
macros = self._dump_macros_verbose(flags=['-fcoroutines-ts'])
if '__cpp_coroutines' not in macros:
self.lit_config.warning('-fcoroutines-ts is supported but '
'__cpp_coroutines is not defined')
# Consider coroutines supported only when the feature test macro
# reflects a recent value.
if intMacroValue(macros['__cpp_coroutines']) >= 201703:
self.config.available_features.add('fcoroutines-ts')
def configure_modules(self):
modules_flags = ['-fmodules']
if platform.system() != 'Darwin':
modules_flags += ['-Xclang', '-fmodules-local-submodule-visibility']
supports_modules = self.cxx.hasCompileFlag(modules_flags)
enable_modules = self.get_modules_enabled()
if enable_modules and not supports_modules:
self.lit_config.fatal(
'-fmodules is enabled but not supported by the compiler')
if not supports_modules:
return
self.config.available_features.add('modules-support')
module_cache = os.path.join(self.config.test_exec_root,
'modules.cache')
module_cache = os.path.realpath(module_cache)
if os.path.isdir(module_cache):
shutil.rmtree(module_cache)
os.makedirs(module_cache)
self.cxx.modules_flags += modules_flags + \
['-fmodules-cache-path=' + module_cache]
if enable_modules:
self.config.available_features.add('-fmodules')
self.cxx.useModules()
def configure_substitutions(self):
sub = self.config.substitutions
cxx_path = pipes.quote(self.cxx.path)
# Configure compiler substitutions
sub.append(('%cxx', cxx_path))
sub.append(('%libcxx_src_root', self.libcudacxx_src_root))
# Configure flags substitutions
flags_str = ' '.join([pipes.quote(f) for f in self.cxx.flags])
compile_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.compile_flags])
link_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.link_flags])
all_flags = '%s %s %s' % (flags_str, compile_flags_str, link_flags_str)
sub.append(('%flags', flags_str))
sub.append(('%compile_flags', compile_flags_str))
sub.append(('%link_flags', link_flags_str))
sub.append(('%all_flags', all_flags))
if self.cxx.isVerifySupported():
verify_str = ' ' + ' '.join(self.cxx.verify_flags) + ' '
sub.append(('%verify', verify_str))
# Add compile and link shortcuts
compile_str = (cxx_path + ' -o %t.o %s -c ' + flags_str
+ ' ' + compile_flags_str)
link_str = (cxx_path + ' -o %t.exe %t.o ' + flags_str + ' '
+ link_flags_str)
assert type(link_str) is str
build_str = cxx_path + ' -o %t.exe %s ' + all_flags
if self.cxx.use_modules:
sub.append(('%compile_module', compile_str))
sub.append(('%build_module', build_str))
elif self.cxx.modules_flags is not None:
modules_str = ' '.join(self.cxx.modules_flags) + ' '
sub.append(('%compile_module', compile_str + ' ' + modules_str))
sub.append(('%build_module', build_str + ' ' + modules_str))
sub.append(('%compile', compile_str))
sub.append(('%link', link_str))
sub.append(('%build', build_str))
# Configure exec prefix substitutions.
# Configure run env substitution.
sub.append(('%run', '%t.exe'))
# Configure not program substitutions
not_py = os.path.join(self.libcudacxx_src_root, 'utils', 'not.py')
not_str = '%s %s ' % (pipes.quote(sys.executable), pipes.quote(not_py))
sub.append(('not ', not_str))
if self.get_lit_conf('libcudacxx_gdb'):
sub.append(('%libcxx_gdb', self.get_lit_conf('libcudacxx_gdb')))
def can_use_deployment(self):
# Check if the host is on an Apple platform using clang.
if not self.target_info.platform() == "darwin":
return False
if not self.target_info.is_host_macosx():
return False
if not self.cxx.type.endswith('clang'):
return False
return True
def configure_triple(self):
# Get or infer the target triple.
target_triple = self.get_lit_conf('target_triple')
self.use_target = self.get_lit_bool('use_target', False)
if self.use_target and target_triple:
self.lit_config.warning('use_target is true but no triple is specified')
# Use deployment if possible.
self.use_deployment = not self.use_target and self.can_use_deployment()
if self.use_deployment:
return
# Save the triple (and warn on Apple platforms).
self.config.target_triple = target_triple
if self.use_target and 'apple' in target_triple:
self.lit_config.warning('consider using arch and platform instead'
' of target_triple on Apple platforms')
# If no target triple was given, try to infer it from the compiler
# under test.
if not self.config.target_triple:
target_triple = (self.cxx if self.cxx.type != 'nvcc' else
self.host_cxx).getTriple()
# Drop sub-major version components from the triple, because the
# current XFAIL handling expects exact matches for feature checks.
# Example: x86_64-apple-darwin14.0.0 -> x86_64-apple-darwin14
# The 5th group handles triples greater than 3 parts
# (ex x86_64-pc-linux-gnu).
target_triple = re.sub(r'([^-]+)-([^-]+)-([^.]+)([^-]*)(.*)',
r'\1-\2-\3\5', target_triple)
# linux-gnu is needed in the triple to properly identify linuxes
# that use GLIBC. Handle redhat and opensuse triples as special
# cases and append the missing `-gnu` portion.
if (target_triple.endswith('redhat-linux') or
target_triple.endswith('suse-linux')):
target_triple += '-gnu'
self.config.target_triple = target_triple
self.lit_config.note(
"inferred target_triple as: %r" % self.config.target_triple)
def configure_deployment(self):
assert not self.use_deployment is None
assert not self.use_target is None
if not self.use_deployment:
# Warn about ignored parameters.
if self.get_lit_conf('arch'):
self.lit_config.warning('ignoring arch, using target_triple')
if self.get_lit_conf('platform'):
self.lit_config.warning('ignoring platform, using target_triple')
return
assert not self.use_target
assert self.target_info.is_host_macosx()
# Always specify deployment explicitly on Apple platforms, since
# otherwise a platform is picked up from the SDK. If the SDK version
# doesn't match the system version, tests that use the system library
# may fail spuriously.
arch = self.get_lit_conf('arch')
if not arch:
arch = (self.cxx if self.cxx.type != 'nvcc' else
self.host_cxx).getTriple().split('-', 1)[0]
self.lit_config.note("inferred arch as: %r" % arch)
inferred_platform, name, version = self.target_info.get_platform()
if inferred_platform:
self.lit_config.note("inferred platform as: %r" % (name + version))
self.config.deployment = (arch, name, version)
# Set the target triple for use by lit.
self.config.target_triple = arch + '-apple-' + name + version
self.lit_config.note(
"computed target_triple as: %r" % self.config.target_triple)
# If we're testing a system libc++ as opposed to the upstream LLVM one,
# take the version of the system libc++ into account to compute which
# features are enabled/disabled. Otherwise, disable availability markup,
# which is not relevant for non-shipped flavors of libc++.
if self.use_system_cxx_lib:
# Dylib support for shared_mutex was added in macosx10.12.
if name == 'macosx' and version in ('10.%s' % v for v in range(7, 12)):
self.config.available_features.add('dylib-has-no-shared_mutex')
self.lit_config.note("shared_mutex is not supported by the deployment target")
# Throwing bad_optional_access, bad_variant_access and bad_any_cast is
# supported starting in macosx10.14.
if name == 'macosx' and version in ('10.%s' % v for v in range(7, 14)):
self.config.available_features.add('dylib-has-no-bad_optional_access')
self.lit_config.note("throwing bad_optional_access is not supported by the deployment target")
self.config.available_features.add('dylib-has-no-bad_variant_access')
self.lit_config.note("throwing bad_variant_access is not supported by the deployment target")
self.config.available_features.add('dylib-has-no-bad_any_cast')
self.lit_config.note("throwing bad_any_cast is not supported by the deployment target")
# Filesystem is support on Apple platforms starting with macosx10.15.
if name == 'macosx' and version in ('10.%s' % v for v in range(7, 15)):
self.config.available_features.add('dylib-has-no-filesystem')
self.lit_config.note("the deployment target does not support <filesystem>")
else:
self.cxx.flags += ['-D_LIBCUDACXX_DISABLE_AVAILABILITY']
def configure_env(self):
self.target_info.configure_env(self.exec_env)
def add_path(self, dest_env, new_path):
if 'PATH' not in dest_env:
dest_env['PATH'] = new_path
else:
split_char = ';' if self.is_windows else ':'
dest_env['PATH'] = '%s%s%s' % (new_path, split_char,
dest_env['PATH'])
| libcudacxx-main | .upstream-tests/utils/libcudacxx/test/config.py |
libcudacxx-main | .upstream-tests/utils/libcudacxx/test/__init__.py |
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import errno
import os
import time
import random
import lit.Test # pylint: disable=import-error
import lit.TestRunner # pylint: disable=import-error
from lit.TestRunner import ParserKind, IntegratedTestKeywordParser \
# pylint: disable=import-error
from libcudacxx.test.executor import LocalExecutor as LocalExecutor
import libcudacxx.util
class LibcxxTestFormat(object):
"""
Custom test format handler for use with the test format use by libc++.
Tests fall into two categories:
FOO.pass.cpp - Executable test which should compile, run, and exit with
code 0.
FOO.fail.cpp - Negative test case which is expected to fail compilation.
FOO.sh.cpp - A test that uses LIT's ShTest format.
"""
def __init__(self, cxx, use_verify_for_fail, execute_external,
executor, exec_env):
self.cxx = copy.deepcopy(cxx)
self.use_verify_for_fail = use_verify_for_fail
self.execute_external = execute_external
self.executor = executor
self.exec_env = dict(exec_env)
@staticmethod
def _make_custom_parsers():
return [
IntegratedTestKeywordParser('FLAKY_TEST.', ParserKind.TAG,
initial_value=False),
IntegratedTestKeywordParser('MODULES_DEFINES:', ParserKind.LIST,
initial_value=[])
]
@staticmethod
def _get_parser(key, parsers):
for p in parsers:
if p.keyword == key:
return p
assert False and "parser not found"
# TODO: Move this into lit's FileBasedTest
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
if any([filename.endswith(ext)
for ext in localConfig.suffixes]):
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
def execute(self, test, lit_config):
while True:
try:
return self._execute(test, lit_config)
except OSError as oe:
if oe.errno != errno.ETXTBSY:
raise
time.sleep(0.1)
def _execute(self, test, lit_config):
name = test.path_in_suite[-1]
name_root, name_ext = os.path.splitext(name)
is_libcxx_test = test.path_in_suite[0] == 'libcxx'
is_sh_test = name_root.endswith('.sh')
is_pass_test = name.endswith('.pass.cpp') or name.endswith('.pass.mm')
is_fail_test = name.endswith('.fail.cpp') or name.endswith('.fail.mm')
is_objcxx_test = name.endswith('.mm')
is_objcxx_arc_test = name.endswith('.arc.pass.mm') or \
name.endswith('.arc.fail.mm')
assert is_sh_test or name_ext == '.cpp' or name_ext == '.mm', \
'non-cpp file must be sh test'
if test.config.unsupported:
return (lit.Test.UNSUPPORTED,
"A lit.local.cfg marked this unsupported")
if is_objcxx_test and not \
'objective-c++' in test.config.available_features:
return (lit.Test.UNSUPPORTED, "Objective-C++ is not supported")
parsers = self._make_custom_parsers()
script = lit.TestRunner.parseIntegratedTestScript(
test, additional_parsers=parsers, require_script=is_sh_test)
# Check if a result for the test was returned. If so return that
# result.
if isinstance(script, lit.Test.Result):
return script
if lit_config.noExecute:
return lit.Test.Result(lit.Test.PASS)
# Check that we don't have run lines on tests that don't support them.
if not is_sh_test and len(script) != 0:
lit_config.fatal('Unsupported RUN line found in test %s' % name)
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir,
tmpBase)
script = lit.TestRunner.applySubstitutions(script, substitutions)
test_cxx = copy.deepcopy(self.cxx)
if is_fail_test:
test_cxx.useCCache(False)
test_cxx.useWarnings(False)
extra_modules_defines = self._get_parser('MODULES_DEFINES:',
parsers).getValue()
if '-fmodules' in test.config.available_features:
test_cxx.compile_flags += [('-D%s' % mdef.strip()) for
mdef in extra_modules_defines]
test_cxx.addWarningFlagIfSupported('-Wno-macro-redefined')
# FIXME: libc++ debug tests #define _LIBCUDACXX_ASSERT to override it
# If we see this we need to build the test against uniquely built
# modules.
if is_libcxx_test:
with open(test.getSourcePath(), 'rb') as f:
contents = f.read()
if b'#define _LIBCUDACXX_ASSERT' in contents:
test_cxx.useModules(False)
if is_objcxx_test:
test_cxx.source_lang = 'objective-c++'
if is_objcxx_arc_test:
test_cxx.compile_flags += ['-fobjc-arc']
else:
test_cxx.compile_flags += ['-fno-objc-arc']
test_cxx.link_flags += ['-framework', 'Foundation']
# Dispatch the test based on its suffix.
if is_sh_test:
if not isinstance(self.executor, LocalExecutor):
# We can't run ShTest tests with a executor yet.
# For now, bail on trying to run them
return lit.Test.UNSUPPORTED, 'ShTest format not yet supported'
test.config.environment = dict(self.exec_env)
return lit.TestRunner._runShTest(test, lit_config,
self.execute_external, script,
tmpBase)
elif is_fail_test:
return self._evaluate_fail_test(test, test_cxx, parsers)
elif is_pass_test:
return self._evaluate_pass_test(test, tmpBase, lit_config,
test_cxx, parsers)
else:
# No other test type is supported
assert False
def _clean(self, exec_path): # pylint: disable=no-self-use
libcudacxx.util.cleanFile(exec_path)
def _evaluate_pass_test(self, test, tmpBase, lit_config,
test_cxx, parsers):
execDir = os.path.dirname(test.getExecPath())
source_path = test.getSourcePath()
exec_path = tmpBase + '.exe'
object_path = tmpBase + '.o'
# Create the output directory if it does not already exist.
libcudacxx.util.mkdir_p(os.path.dirname(tmpBase))
try:
# Compile the test
cmd, out, err, rc = test_cxx.compileLinkTwoSteps(
source_path, out=exec_path, object_file=object_path,
cwd=execDir)
compile_cmd = cmd
if rc != 0:
report = libcudacxx.util.makeReport(cmd, out, err, rc)
report += "Compilation failed unexpectedly!"
return lit.Test.Result(lit.Test.FAIL, report)
# Run the test
local_cwd = os.path.dirname(source_path)
env = None
if self.exec_env:
env = self.exec_env
# TODO: Only list actually needed files in file_deps.
# Right now we just mark all of the .dat files in the same
# directory as dependencies, but it's likely less than that. We
# should add a `// FILE-DEP: foo.dat` to each test to track this.
data_files = [os.path.join(local_cwd, f)
for f in os.listdir(local_cwd) if f.endswith('.dat')]
is_flaky = self._get_parser('FLAKY_TEST.', parsers).getValue()
max_retry = 3 if is_flaky else 1
for retry_count in range(max_retry):
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
local_cwd, data_files,
env)
report = "Compiled With: '%s'\n" % ' '.join(compile_cmd)
report += libcudacxx.util.makeReport(cmd, out, err, rc)
if rc == 0:
res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS
return lit.Test.Result(res, report)
# Rarely devices are unavailable, so just restart the test to avoid false negatives.
elif rc != 0 and "cudaErrorDevicesUnavailable" in out and max_retry <= 5:
max_retry += 1
elif rc != 0 and retry_count + 1 >= max_retry:
report += "Compiled test failed unexpectedly!"
return lit.Test.Result(lit.Test.FAIL, report)
assert False # Unreachable
finally:
# Note that cleanup of exec_file happens in `_clean()`. If you
# override this, cleanup is your reponsibility.
libcudacxx.util.cleanFile(object_path)
self._clean(exec_path)
def _evaluate_fail_test(self, test, test_cxx, parsers):
source_path = test.getSourcePath()
# FIXME: lift this detection into LLVM/LIT.
with open(source_path, 'rb') as f:
contents = f.read()
verify_tags = [b'expected-note', b'expected-remark',
b'expected-warning', b'expected-error',
b'expected-no-diagnostics']
use_verify = self.use_verify_for_fail and \
any([tag in contents for tag in verify_tags])
# FIXME(EricWF): GCC 5 does not evaluate static assertions that
# are dependant on a template parameter when '-fsyntax-only' is passed.
# This is fixed in GCC 6. However for now we only pass "-fsyntax-only"
# when using Clang.
if test_cxx.type != 'gcc' and test_cxx.type != 'nvcc':
test_cxx.flags += ['-fsyntax-only']
if use_verify:
test_cxx.useVerify()
test_cxx.useWarnings()
if '-Wuser-defined-warnings' in test_cxx.warning_flags:
test_cxx.warning_flags += ['-Wno-error=user-defined-warnings']
else:
# We still need to enable certain warnings on .fail.cpp test when
# -verify isn't enabled. Such as -Werror=unused-result. However,
# we don't want it enabled too liberally, which might incorrectly
# allow unrelated failure tests to 'pass'.
#
# Therefore, we check if the test was expected to fail because of
# nodiscard before enabling it
test_str_list = [b'ignoring return value', b'nodiscard',
b'NODISCARD']
if any(test_str in contents for test_str in test_str_list):
test_cxx.flags += ['-Werror=unused-result']
cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull)
check_rc = lambda rc: rc == 0 if use_verify else rc != 0
report = libcudacxx.util.makeReport(cmd, out, err, rc)
if check_rc(rc):
return lit.Test.Result(lit.Test.PASS, report)
else:
report += ('Expected compilation to fail!\n' if not use_verify else
'Expected compilation using verify to pass!\n')
return lit.Test.Result(lit.Test.FAIL, report)
| libcudacxx-main | .upstream-tests/utils/libcudacxx/test/format.py |
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, '--benchmark_list_tests']
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
raise StopIteration
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if not ln.strip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err
passing_test_line = testName
if passing_test_line not in out:
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out
| libcudacxx-main | .upstream-tests/utils/libcudacxx/test/googlebenchmark.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import platform
import os
from libcudacxx.test import tracing
from libcudacxx.util import executeCommand
class Executor(object):
def run(self, exe_path, cmd, local_cwd, file_deps=None, env=None):
"""Execute a command.
Be very careful not to change shared state in this function.
Executor objects are shared between python processes in `lit -jN`.
Args:
exe_path: str: Local path to the executable to be run
cmd: [str]: subprocess.call style command
local_cwd: str: Local path to the working directory
file_deps: [str]: Files required by the test
env: {str: str}: Environment variables to execute under
Returns:
cmd, out, err, exitCode
"""
raise NotImplementedError
class LocalExecutor(Executor):
def __init__(self):
super(LocalExecutor, self).__init__()
self.is_windows = platform.system() == 'Windows'
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
cmd = cmd or [exe_path]
if work_dir == '.':
work_dir = os.getcwd()
out, err, rc = executeCommand(cmd, cwd=work_dir, env=env, timeout=self.timeout)
return (cmd, out, err, rc)
class NoopExecutor(Executor):
def __init__(self):
super(NoopExecutor, self).__init__()
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
return (cmd, '', '', 0)
class PrefixExecutor(Executor):
"""Prefix an executor with some other command wrapper.
Most useful for setting ulimits on commands, or running an emulator like
qemu and valgrind.
"""
def __init__(self, commandPrefix, chain):
super(PrefixExecutor, self).__init__()
self.commandPrefix = commandPrefix
self.chain = chain
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
cmd = cmd or [exe_path]
return self.chain.run(exe_path, self.commandPrefix + cmd, work_dir,
file_deps, env=env)
class PostfixExecutor(Executor):
"""Postfix an executor with some args."""
def __init__(self, commandPostfix, chain):
super(PostfixExecutor, self).__init__()
self.commandPostfix = commandPostfix
self.chain = chain
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
cmd = cmd or [exe_path]
return self.chain.run(cmd + self.commandPostfix, work_dir, file_deps,
env=env)
class TimeoutExecutor(PrefixExecutor):
"""Execute another action under a timeout.
Deprecated. http://reviews.llvm.org/D6584 adds timeouts to LIT.
"""
def __init__(self, duration, chain):
super(TimeoutExecutor, self).__init__(
['timeout', duration], chain)
class RemoteExecutor(Executor):
def __init__(self):
self.local_run = executeCommand
def remote_temp_dir(self):
return self._remote_temp(True)
def remote_temp_file(self):
return self._remote_temp(False)
def _remote_temp(self, is_dir):
raise NotImplementedError()
def copy_in(self, local_srcs, remote_dsts):
# This could be wrapped up in a tar->scp->untar for performance
# if there are lots of files to be copied/moved
for src, dst in zip(local_srcs, remote_dsts):
self._copy_in_file(src, dst)
def _copy_in_file(self, src, dst):
raise NotImplementedError()
def delete_remote(self, remote):
try:
self._execute_command_remote(['rm', '-rf', remote])
except OSError:
# TODO: Log failure to delete?
pass
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
target_exe_path = None
target_cwd = None
try:
target_cwd = self.remote_temp_dir()
target_exe_path = os.path.join(target_cwd, 'libcxx_test.exe')
if cmd:
# Replace exe_path with target_exe_path.
cmd = [c if c != exe_path else target_exe_path for c in cmd]
else:
cmd = [target_exe_path]
srcs = [exe_path]
dsts = [target_exe_path]
if file_deps is not None:
dev_paths = [os.path.join(target_cwd, os.path.basename(f))
for f in file_deps]
srcs.extend(file_deps)
dsts.extend(dev_paths)
self.copy_in(srcs, dsts)
# TODO(jroelofs): capture the copy_in and delete_remote commands,
# and conjugate them with '&&'s around the first tuple element
# returned here:
return self._execute_command_remote(cmd, target_cwd, env)
finally:
if target_cwd:
self.delete_remote(target_cwd)
def _execute_command_remote(self, cmd, remote_work_dir='.', env=None):
raise NotImplementedError()
class SSHExecutor(RemoteExecutor):
def __init__(self, host, username=None):
super(SSHExecutor, self).__init__()
self.user_prefix = username + '@' if username else ''
self.host = host
self.scp_command = 'scp'
self.ssh_command = 'ssh'
# TODO(jroelofs): switch this on some -super-verbose-debug config flag
if False:
self.local_run = tracing.trace_function(
self.local_run, log_calls=True, log_results=True,
label='ssh_local')
def _remote_temp(self, is_dir):
# TODO: detect what the target system is, and use the correct
# mktemp command for it. (linux and darwin differ here, and I'm
# sure windows has another way to do it)
# Not sure how to do suffix on osx yet
dir_arg = '-d' if is_dir else ''
cmd = 'mktemp -q {} /tmp/libcxx.XXXXXXXXXX'.format(dir_arg)
_, temp_path, err, exitCode = self._execute_command_remote([cmd])
temp_path = temp_path.strip()
if exitCode != 0:
raise RuntimeError(err)
return temp_path
def _copy_in_file(self, src, dst):
scp = self.scp_command
remote = self.host
remote = self.user_prefix + remote
cmd = [scp, '-p', src, remote + ':' + dst]
self.local_run(cmd)
def _execute_command_remote(self, cmd, remote_work_dir='.', env=None):
remote = self.user_prefix + self.host
ssh_cmd = [self.ssh_command, '-oBatchMode=yes', remote]
if env:
env_cmd = ['env'] + ['%s="%s"' % (k, v) for k, v in env.items()]
else:
env_cmd = []
remote_cmd = ' '.join(env_cmd + cmd)
if remote_work_dir != '.':
remote_cmd = 'cd ' + remote_work_dir + ' && ' + remote_cmd
out, err, rc = self.local_run(ssh_cmd + [remote_cmd])
return (remote_cmd, out, err, rc)
| libcudacxx-main | .upstream-tests/utils/libcudacxx/test/executor.py |
#===----------------------------------------------------------------------===//
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===//
import importlib
import locale
import os
import platform
import re
import subprocess
import sys
from libcudacxx.util import executeCommand
class DefaultTargetInfo(object):
def __init__(self, full_config):
self.full_config = full_config
def platform(self):
return sys.platform.lower().strip()
def add_locale_features(self, features):
self.full_config.lit_config.warning(
"No locales entry for target_system: %s" % self.platform())
def add_cxx_compile_flags(self, flags): pass
def add_cxx_link_flags(self, flags): pass
def configure_env(self, env): pass
def allow_cxxabi_link(self): return True
def add_sanitizer_features(self, sanitizer_type, features): pass
def use_lit_shell_default(self): return False
def test_locale(loc):
assert loc is not None
default_locale = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, loc)
return True
except locale.Error:
return False
finally:
locale.setlocale(locale.LC_ALL, default_locale)
def add_common_locales(features, lit_config, is_windows=False):
# A list of locales needed by the test-suite.
# The list uses the canonical name for the locale used in the test-suite
# TODO: On Linux ISO8859 *may* needs to hyphenated.
locales = [
('en_US.UTF-8', 'English_United States.1252'),
('fr_FR.UTF-8', 'French_France.1252'),
('ru_RU.UTF-8', 'Russian_Russia.1251'),
('zh_CN.UTF-8', 'Chinese_China.936'),
('fr_CA.ISO8859-1', 'French_Canada.1252'),
('cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250')
]
for loc_id, windows_loc_name in locales:
loc_name = windows_loc_name if is_windows else loc_id
if test_locale(loc_name):
features.add('locale.{0}'.format(loc_id))
else:
lit_config.warning('The locale {0} is not supported by '
'your platform. Some tests will be '
'unsupported.'.format(loc_name))
class DarwinLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(DarwinLocalTI, self).__init__(full_config)
def is_host_macosx(self):
name = subprocess.check_output(['sw_vers', '-productName']).strip()
return name == "Mac OS X"
def get_macosx_version(self):
assert self.is_host_macosx()
version = subprocess.check_output(
['sw_vers', '-productVersion']).strip()
version = re.sub(r'([0-9]+\.[0-9]+)(\..*)?', r'\1', version)
return version
def get_sdk_version(self, name):
assert self.is_host_macosx()
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
try:
out = subprocess.check_output(cmd).strip()
except OSError:
pass
if not out:
self.full_config.lit_config.fatal(
"cannot infer sdk version with: %r" % cmd)
return re.sub(r'.*/[^0-9]+([0-9.]+)\.sdk', r'\1', out)
def get_platform(self):
platform = self.full_config.get_lit_conf('platform')
if platform:
platform = re.sub(r'([^0-9]+)([0-9\.]*)', r'\1-\2', platform)
name, version = tuple(platform.split('-', 1))
else:
name = 'macosx'
version = None
if version:
return (False, name, version)
# Infer the version, either from the SDK or the system itself. For
# macosx, ignore the SDK version; what matters is what's at
# /usr/lib/libc++.dylib.
if name == 'macosx':
version = self.get_macosx_version()
else:
version = self.get_sdk_version(name)
return (True, name, version)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_compile_flags(self, flags):
if self.full_config.use_deployment:
_, name, _ = self.full_config.config.deployment
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
else:
cmd = ['xcrun', '--show-sdk-path']
out, err, exit_code = executeCommand(cmd)
if exit_code != 0:
self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err)
if exit_code == 0 and out:
sdk_path = out.strip()
self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path)
assert isinstance(sdk_path, str)
flags += ["-isysroot", sdk_path]
def add_cxx_link_flags(self, flags):
flags += ['-lSystem']
def configure_env(self, env):
library_paths = []
# Configure the library path for libc++
if self.full_config.cxx_runtime_root:
library_paths += [self.full_config.cxx_runtime_root]
elif self.full_config.use_system_cxx_lib:
if (os.path.isdir(str(self.full_config.use_system_cxx_lib))):
library_paths += [self.full_config.use_system_cxx_lib]
# Configure the abi library path
if self.full_config.abi_library_root:
library_paths += [self.full_config.abi_library_root]
if library_paths:
env['DYLD_LIBRARY_PATH'] = ':'.join(library_paths)
def allow_cxxabi_link(self):
# Don't link libc++abi explicitly on OS X because the symbols
# should be available in libc++ directly.
return False
class FreeBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(FreeBSDLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt']
class NetBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(NetBSDLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi',
'-lunwind']
class LinuxLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(LinuxLocalTI, self).__init__(full_config)
def platform(self):
return 'linux'
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_compile_flags(self, flags):
flags += ['-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
'-D__STDC_CONSTANT_MACROS']
def add_cxx_link_flags(self, flags):
enable_threads = ('libcpp-has-no-threads' not in
self.full_config.config.available_features)
llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False)
shared_libcxx = self.full_config.get_lit_bool('enable_shared', True)
flags += ['-lm']
if not llvm_unwinder:
flags += ['-lgcc_s', '-lgcc']
if enable_threads:
flags += ['-lpthread']
if not shared_libcxx:
flags += ['-lrt']
flags += ['-lc']
if llvm_unwinder:
flags += ['-lunwind', '-ldl']
else:
flags += ['-lgcc_s']
builtins_lib = self.full_config.get_lit_conf('builtins_library')
if builtins_lib:
flags += [builtins_lib]
else:
flags += ['-lgcc']
use_libatomic = self.full_config.get_lit_bool('use_libatomic', False)
if use_libatomic:
flags += ['-latomic']
san = self.full_config.get_lit_conf('use_sanitizer', '').strip()
if san:
# The libraries and their order are taken from the
# linkSanitizerRuntimeDeps function in
# clang/lib/Driver/Tools.cpp
flags += ['-lpthread', '-lrt', '-lm', '-ldl']
class WindowsLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(WindowsLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config,
is_windows=True)
def use_lit_shell_default(self):
# Default to the internal shell on Windows, as bash on Windows is
# usually very slow.
return True
def make_target_info(full_config):
default = "libcudacxx.test.target_info.LocalTI"
info_str = full_config.get_lit_conf('target_info', default)
if info_str != default:
mod_path, _, info = info_str.rpartition('.')
mod = importlib.import_module(mod_path)
target_info = getattr(mod, info)(full_config)
full_config.lit_config.note("inferred target_info as: %r" % info_str)
return target_info
target_system = platform.system()
if target_system == 'Darwin': return DarwinLocalTI(full_config)
if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config)
if target_system == 'NetBSD': return NetBSDLocalTI(full_config)
if target_system == 'Linux': return LinuxLocalTI(full_config)
if target_system == 'Windows': return WindowsLocalTI(full_config)
return DefaultTargetInfo(full_config)
| libcudacxx-main | .upstream-tests/utils/libcudacxx/test/target_info.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import ast
import distutils.spawn
import sys
import re
import libcudacxx.util
from pprint import pformat
def read_syms_from_list(slist):
"""
Read a list of symbols from a list of strings.
Each string is one symbol.
"""
return [ast.literal_eval(l) for l in slist]
def read_syms_from_file(filename):
"""
Read a list of symbols in from a file.
"""
with open(filename, 'r') as f:
data = f.read()
return read_syms_from_list(data.splitlines())
def read_blacklist(filename):
with open(filename, 'r') as f:
data = f.read()
lines = [l.strip() for l in data.splitlines() if l.strip()]
lines = [l for l in lines if not l.startswith('#')]
return lines
def write_syms(sym_list, out=None, names_only=False, filter=None):
"""
Write a list of symbols to the file named by out.
"""
out_str = ''
out_list = sym_list
out_list.sort(key=lambda x: x['name'])
if filter is not None:
out_list = filter(out_list)
if names_only:
out_list = [sym['name'] for sym in out_list]
for sym in out_list:
# Use pformat for consistent ordering of keys.
out_str += pformat(sym, width=100000) + '\n'
if out is None:
sys.stdout.write(out_str)
else:
with open(out, 'w') as f:
f.write(out_str)
_cppfilt_exe = distutils.spawn.find_executable('c++filt')
def demangle_symbol(symbol):
if _cppfilt_exe is None:
return symbol
out, _, exit_code = libcudacxx.util.executeCommandVerbose(
[_cppfilt_exe], input=symbol)
if exit_code != 0:
return symbol
return out
def is_elf(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes == b'\x7fELF'
def is_mach_o(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes in [
'\xfe\xed\xfa\xce', # MH_MAGIC
'\xce\xfa\xed\xfe', # MH_CIGAM
'\xfe\xed\xfa\xcf', # MH_MAGIC_64
'\xcf\xfa\xed\xfe', # MH_CIGAM_64
'\xca\xfe\xba\xbe', # FAT_MAGIC
'\xbe\xba\xfe\xca' # FAT_CIGAM
]
def is_library_file(filename):
if sys.platform == 'darwin':
return is_mach_o(filename)
else:
return is_elf(filename)
def extract_or_load(filename):
import libcudacxx.sym_check.extract
if is_library_file(filename):
return libcudacxx.sym_check.extract.extract_symbols(filename)
return read_syms_from_file(filename)
def adjust_mangled_name(name):
if not name.startswith('__Z'):
return name
return name[1:]
new_delete_std_symbols = [
'_Znam',
'_Znwm',
'_ZdaPv',
'_ZdaPvm',
'_ZdlPv',
'_ZdlPvm'
]
cxxabi_symbols = [
'___dynamic_cast',
'___gxx_personality_v0',
'_ZTIDi',
'_ZTIDn',
'_ZTIDs',
'_ZTIPDi',
'_ZTIPDn',
'_ZTIPDs',
'_ZTIPKDi',
'_ZTIPKDn',
'_ZTIPKDs',
'_ZTIPKa',
'_ZTIPKb',
'_ZTIPKc',
'_ZTIPKd',
'_ZTIPKe',
'_ZTIPKf',
'_ZTIPKh',
'_ZTIPKi',
'_ZTIPKj',
'_ZTIPKl',
'_ZTIPKm',
'_ZTIPKs',
'_ZTIPKt',
'_ZTIPKv',
'_ZTIPKw',
'_ZTIPKx',
'_ZTIPKy',
'_ZTIPa',
'_ZTIPb',
'_ZTIPc',
'_ZTIPd',
'_ZTIPe',
'_ZTIPf',
'_ZTIPh',
'_ZTIPi',
'_ZTIPj',
'_ZTIPl',
'_ZTIPm',
'_ZTIPs',
'_ZTIPt',
'_ZTIPv',
'_ZTIPw',
'_ZTIPx',
'_ZTIPy',
'_ZTIa',
'_ZTIb',
'_ZTIc',
'_ZTId',
'_ZTIe',
'_ZTIf',
'_ZTIh',
'_ZTIi',
'_ZTIj',
'_ZTIl',
'_ZTIm',
'_ZTIs',
'_ZTIt',
'_ZTIv',
'_ZTIw',
'_ZTIx',
'_ZTIy',
'_ZTSDi',
'_ZTSDn',
'_ZTSDs',
'_ZTSPDi',
'_ZTSPDn',
'_ZTSPDs',
'_ZTSPKDi',
'_ZTSPKDn',
'_ZTSPKDs',
'_ZTSPKa',
'_ZTSPKb',
'_ZTSPKc',
'_ZTSPKd',
'_ZTSPKe',
'_ZTSPKf',
'_ZTSPKh',
'_ZTSPKi',
'_ZTSPKj',
'_ZTSPKl',
'_ZTSPKm',
'_ZTSPKs',
'_ZTSPKt',
'_ZTSPKv',
'_ZTSPKw',
'_ZTSPKx',
'_ZTSPKy',
'_ZTSPa',
'_ZTSPb',
'_ZTSPc',
'_ZTSPd',
'_ZTSPe',
'_ZTSPf',
'_ZTSPh',
'_ZTSPi',
'_ZTSPj',
'_ZTSPl',
'_ZTSPm',
'_ZTSPs',
'_ZTSPt',
'_ZTSPv',
'_ZTSPw',
'_ZTSPx',
'_ZTSPy',
'_ZTSa',
'_ZTSb',
'_ZTSc',
'_ZTSd',
'_ZTSe',
'_ZTSf',
'_ZTSh',
'_ZTSi',
'_ZTSj',
'_ZTSl',
'_ZTSm',
'_ZTSs',
'_ZTSt',
'_ZTSv',
'_ZTSw',
'_ZTSx',
'_ZTSy'
]
def is_stdlib_symbol_name(name, sym):
name = adjust_mangled_name(name)
if re.search("@GLIBC|@GCC", name):
# Only when symbol is defined do we consider it ours
return sym['is_defined']
if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name):
return True
if name in new_delete_std_symbols:
return True
if name in cxxabi_symbols:
return True
if name.startswith('_Z'):
return True
return False
def filter_stdlib_symbols(syms):
stdlib_symbols = []
other_symbols = []
for s in syms:
canon_name = adjust_mangled_name(s['name'])
if not is_stdlib_symbol_name(canon_name, s):
other_symbols += [s]
else:
stdlib_symbols += [s]
return stdlib_symbols, other_symbols
| libcudacxx-main | .upstream-tests/utils/libcudacxx/sym_check/util.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""libcxx abi symbol checker"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = ['diff', 'extract', 'util']
| libcudacxx-main | .upstream-tests/utils/libcudacxx/sym_check/__init__.py |
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
diff - A set of functions for diff-ing two symbol lists.
"""
from libcudacxx.sym_check import util
def _symbol_difference(lhs, rhs):
lhs_names = set(((n['name'], n['type']) for n in lhs))
rhs_names = set(((n['name'], n['type']) for n in rhs))
diff_names = lhs_names - rhs_names
return [n for n in lhs if (n['name'], n['type']) in diff_names]
def _find_by_key(sym_list, k):
for sym in sym_list:
if sym['name'] == k:
return sym
return None
def added_symbols(old, new):
return _symbol_difference(new, old)
def removed_symbols(old, new):
return _symbol_difference(old, new)
def changed_symbols(old, new):
changed = []
for old_sym in old:
if old_sym in new:
continue
new_sym = _find_by_key(new, old_sym['name'])
if (new_sym is not None and not new_sym in old
and old_sym != new_sym):
changed += [(old_sym, new_sym)]
return changed
def diff(old, new):
added = added_symbols(old, new)
removed = removed_symbols(old, new)
changed = changed_symbols(old, new)
return added, removed, changed
def report_diff(added_syms, removed_syms, changed_syms, names_only=False,
demangle=True):
def maybe_demangle(name):
return util.demangle_symbol(name) if demangle else name
report = ''
for sym in added_syms:
report += 'Symbol added: %s\n' % maybe_demangle(sym['name'])
if not names_only:
report += ' %s\n\n' % sym
if added_syms and names_only:
report += '\n'
for sym in removed_syms:
report += 'SYMBOL REMOVED: %s\n' % maybe_demangle(sym['name'])
if not names_only:
report += ' %s\n\n' % sym
if removed_syms and names_only:
report += '\n'
if not names_only:
for sym_pair in changed_syms:
old_sym, new_sym = sym_pair
old_str = '\n OLD SYMBOL: %s' % old_sym
new_str = '\n NEW SYMBOL: %s' % new_sym
report += ('SYMBOL CHANGED: %s%s%s\n\n' %
(maybe_demangle(old_sym['name']),
old_str, new_str))
added = bool(len(added_syms) != 0)
abi_break = bool(len(removed_syms))
if not names_only:
abi_break = abi_break or len(changed_syms)
if added or abi_break:
report += 'Summary\n'
report += ' Added: %d\n' % len(added_syms)
report += ' Removed: %d\n' % len(removed_syms)
if not names_only:
report += ' Changed: %d\n' % len(changed_syms)
if not abi_break:
report += 'Symbols added.'
else:
report += 'ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!'
else:
report += 'Symbols match.'
is_different = abi_break or bool(len(added_syms)) \
or bool(len(changed_syms))
return report, abi_break, is_different
| libcudacxx-main | .upstream-tests/utils/libcudacxx/sym_check/diff.py |
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
extract - A set of function that extract symbol lists from shared libraries.
"""
import distutils.spawn
import os.path
import sys
import re
import libcudacxx.util
from libcudacxx.sym_check import util
extract_ignore_names = ['_init', '_fini']
class NMExtractor(object):
"""
NMExtractor - Extract symbol lists from libraries using nm.
"""
@staticmethod
def find_tool():
"""
Search for the nm executable and return the path.
"""
return distutils.spawn.find_executable('nm')
def __init__(self, static_lib):
"""
Initialize the nm executable and flags that will be used to extract
symbols from shared libraries.
"""
self.nm_exe = self.find_tool()
if self.nm_exe is None:
# ERROR no NM found
print("ERROR: Could not find nm")
sys.exit(1)
self.static_lib = static_lib
self.flags = ['-P', '-g']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.nm_exe] + self.flags + [lib]
out, _, exit_code = libcudacxx.util.executeCommandVerbose(cmd)
if exit_code != 0:
raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib))
fmt_syms = (self._extract_sym(l)
for l in out.splitlines() if l.strip())
# Cast symbol to string.
final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))
# Make unique and sort strings.
tmp_list = list(sorted(set(final_syms)))
# Cast string back to symbol.
return util.read_syms_from_list(tmp_list)
def _extract_sym(self, sym_str):
bits = sym_str.split()
# Everything we want has at least two columns.
if len(bits) < 2:
return None
new_sym = {
'name': bits[0],
'type': bits[1],
'is_defined': (bits[1].lower() != 'u')
}
new_sym['name'] = new_sym['name'].replace('@@', '@')
new_sym = self._transform_sym_type(new_sym)
# NM types which we want to save the size for.
if new_sym['type'] == 'OBJECT' and len(bits) > 3:
new_sym['size'] = int(bits[3], 16)
return new_sym
@staticmethod
def _want_sym(sym):
"""
Check that s is a valid symbol that we want to keep.
"""
if sym is None or len(sym) < 2:
return False
if sym['name'] in extract_ignore_names:
return False
bad_types = ['t', 'b', 'r', 'd', 'w']
return (sym['type'] not in bad_types
and sym['name'] not in ['__bss_start', '_end', '_edata'])
@staticmethod
def _transform_sym_type(sym):
"""
Map the nm single letter output for type to either FUNC or OBJECT.
If the type is not recognized it is left unchanged.
"""
func_types = ['T', 'W']
obj_types = ['B', 'D', 'R', 'V', 'S']
if sym['type'] in func_types:
sym['type'] = 'FUNC'
elif sym['type'] in obj_types:
sym['type'] = 'OBJECT'
return sym
class ReadElfExtractor(object):
"""
ReadElfExtractor - Extract symbol lists from libraries using readelf.
"""
@staticmethod
def find_tool():
"""
Search for the readelf executable and return the path.
"""
return distutils.spawn.find_executable('readelf')
def __init__(self, static_lib):
"""
Initialize the readelf executable and flags that will be used to
extract symbols from shared libraries.
"""
self.tool = self.find_tool()
if self.tool is None:
# ERROR no NM found
print("ERROR: Could not find readelf")
sys.exit(1)
# TODO: Support readelf for reading symbols from archives
assert not static_lib and "RealElf does not yet support static libs"
self.flags = ['--wide', '--symbols']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out, _, exit_code = libcudacxx.util.executeCommandVerbose(cmd)
if exit_code != 0:
raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib))
dyn_syms = self.get_dynsym_table(out)
return self.process_syms(dyn_syms)
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 7 or len(parts) == 8 or len(parts) == 9
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'size': int(parts[2]),
'type': parts[3],
'is_defined': (parts[6] != 'UND')
}
assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE', 'TLS']
if new_sym['name'] in extract_ignore_names:
continue
if new_sym['type'] == 'NOTYPE':
continue
if new_sym['type'] == 'FUNC':
del new_sym['size']
new_syms += [new_sym]
return new_syms
def get_dynsym_table(self, out):
lines = out.splitlines()
start = -1
end = -1
for i in range(len(lines)):
if lines[i].startswith("Symbol table '.dynsym'"):
start = i + 2
if start != -1 and end == -1 and not lines[i].strip():
end = i + 1
assert start != -1
if end == -1:
end = len(lines)
return lines[start:end]
def extract_symbols(lib_file, static_lib=None):
"""
Extract and return a list of symbols extracted from a static or dynamic
library. The symbols are extracted using NM or readelf. They are then
filtered and formated. Finally they symbols are made unique.
"""
if static_lib is None:
_, ext = os.path.splitext(lib_file)
static_lib = True if ext in ['.a'] else False
if ReadElfExtractor.find_tool() and not static_lib:
extractor = ReadElfExtractor(static_lib=static_lib)
else:
extractor = NMExtractor(static_lib=static_lib)
return extractor.extract(lib_file)
| libcudacxx-main | .upstream-tests/utils/libcudacxx/sym_check/extract.py |
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
match - A set of functions for matching symbols in a list to a list of regexs
"""
import re
def find_and_report_matching(symbol_list, regex_list):
report = ''
found_count = 0
for regex_str in regex_list:
report += 'Matching regex "%s":\n' % regex_str
matching_list = find_matching_symbols(symbol_list, regex_str)
if not matching_list:
report += ' No matches found\n\n'
continue
# else
found_count += len(matching_list)
for m in matching_list:
report += ' MATCHES: %s\n' % m['name']
report += '\n'
return found_count, report
def find_matching_symbols(symbol_list, regex_str):
regex = re.compile(regex_str)
matching_list = []
for s in symbol_list:
if regex.match(s['name']):
matching_list += [s]
return matching_list
| libcudacxx-main | .upstream-tests/utils/libcudacxx/sym_check/match.py |
# -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79:
# Configuration file for the 'lit' test runner.
import os
import site
site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'utils'))
from libcxx.test.googlebenchmark import GoogleBenchmark
# Tell pylint that we know config and lit_config exist somewhere.
if 'PYLINT_IMPORT' in os.environ:
config = object()
lit_config = object()
# name: The name of this test suite.
config.name = 'libc++ benchmarks'
config.suffixes = []
config.test_exec_root = os.path.join(config.libcxx_obj_root, 'benchmarks')
config.test_source_root = config.test_exec_root
config.test_format = GoogleBenchmark(test_sub_dirs='.',
test_suffix='.libcxx.out',
benchmark_args=config.benchmark_args) | libcudacxx-main | .upstream-tests/benchmarks/lit.cfg.py |
# All the Lit configuration is handled in the site configs -- this file is only
# left as a canary to catch invocations of Lit that do not go through llvm-lit.
#
# Invocations that go through llvm-lit will automatically use the right Lit
# site configuration inside the build directory.
lit_config.fatal(
"You seem to be running Lit directly -- you should be running Lit through "
"<build>/bin/llvm-lit, which will ensure that the right Lit configuration "
"file is used.")
| libcudacxx-main | libcxxabi/test/lit.cfg.py |
libcudacxx-main | libcxxabi/test/libcxxabi/__init__.py |
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import sys
from libcxx.test.config import Configuration as LibcxxConfiguration
from libcxx.test.config import intMacroValue
class Configuration(LibcxxConfiguration):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
super(Configuration, self).__init__(lit_config, config)
self.libcxxabi_src_root = None
self.libcxxabi_obj_root = None
self.abi_library_path = None
self.libcxx_src_root = None
def configure_src_root(self):
self.libcxxabi_src_root = self.get_lit_conf(
'libcxxabi_src_root',
os.path.dirname(self.config.test_source_root))
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root',
os.path.join(self.libcxxabi_src_root, '/../libcxx'))
def configure_obj_root(self):
self.libcxxabi_obj_root = self.get_lit_conf('libcxxabi_obj_root')
super(Configuration, self).configure_obj_root()
def has_cpp_feature(self, feature, required_value):
return intMacroValue(self.cxx.dumpMacros().get('__cpp_' + feature, '0')) >= required_value
def configure_features(self):
super(Configuration, self).configure_features()
if not self.has_cpp_feature('noexcept_function_type', 201510):
self.config.available_features.add('libcxxabi-no-noexcept-function-type')
if not self.get_lit_bool('llvm_unwinder', False):
self.config.available_features.add('libcxxabi-has-system-unwinder')
def configure_compile_flags(self):
self.cxx.compile_flags += [
'-DLIBCXXABI_NO_TIMER',
'-D_LIBCUDACXX_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS',
]
if self.get_lit_bool('enable_exceptions', True):
self.cxx.compile_flags += ['-funwind-tables']
if not self.get_lit_bool('enable_threads', True):
self.cxx.compile_flags += ['-D_LIBCXXABI_HAS_NO_THREADS']
self.config.available_features.add('libcxxabi-no-threads')
super(Configuration, self).configure_compile_flags()
def configure_compile_flags_header_includes(self):
self.configure_config_site_header()
cxx_headers = self.get_lit_conf(
'cxx_headers',
os.path.join(self.libcxx_src_root, '/include'))
if cxx_headers == '':
self.lit_config.note('using the systems c++ headers')
else:
self.cxx.compile_flags += ['-nostdinc++']
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='%s' is not a directory."
% cxx_headers)
self.cxx.compile_flags += ['-I' + cxx_headers]
libcxxabi_headers = self.get_lit_conf(
'libcxxabi_headers',
os.path.join(self.libcxxabi_src_root, 'include'))
if not os.path.isdir(libcxxabi_headers):
self.lit_config.fatal("libcxxabi_headers='%s' is not a directory."
% libcxxabi_headers)
self.cxx.compile_flags += ['-I' + libcxxabi_headers]
libunwind_headers = self.get_lit_conf('libunwind_headers', None)
if self.get_lit_bool('llvm_unwinder', False) and libunwind_headers:
if not os.path.isdir(libunwind_headers):
self.lit_config.fatal("libunwind_headers='%s' is not a directory."
% libunwind_headers)
self.cxx.compile_flags += ['-I' + libunwind_headers]
| libcudacxx-main | libcxxabi/test/libcxxabi/test/config.py |
libcudacxx-main | libcxxabi/test/libcxxabi/test/__init__.py |
|
# All the Lit configuration is handled in the site configs -- this file is only
# left as a canary to catch invocations of Lit that do not go through llvm-lit.
#
# Invocations that go through llvm-lit will automatically use the right Lit
# site configuration inside the build directory.
lit_config.fatal(
"You seem to be running Lit directly -- you should be running Lit through "
"<build>/bin/llvm-lit, which will ensure that the right Lit configuration "
"file is used.")
| libcudacxx-main | libunwind/test/lit.cfg.py |
libcudacxx-main | libunwind/test/libunwind/__init__.py |
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import sys
from libcxx.test.config import Configuration as LibcxxConfiguration
class Configuration(LibcxxConfiguration):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
super(Configuration, self).__init__(lit_config, config)
self.libunwind_src_root = None
self.libunwind_obj_root = None
self.abi_library_path = None
self.libcxx_src_root = None
def configure_src_root(self):
self.libunwind_src_root = (self.get_lit_conf('libunwind_src_root')
or os.path.dirname(self.config.test_source_root))
self.libcxx_src_root = (self.get_lit_conf('libcxx_src_root')
or os.path.join(self.libunwind_src_root, '..', 'libcxx'))
def configure_obj_root(self):
self.libunwind_obj_root = self.get_lit_conf('libunwind_obj_root')
super(Configuration, self).configure_obj_root()
def has_cpp_feature(self, feature, required_value):
return int(self.cxx.dumpMacros().get('__cpp_' + feature, 0)) >= required_value
def configure_features(self):
super(Configuration, self).configure_features()
if self.get_lit_bool('arm_ehabi', False):
self.config.available_features.add('libunwind-arm-ehabi')
def configure_compile_flags(self):
self.cxx.compile_flags += ['-DLIBUNWIND_NO_TIMER']
# Stack unwinding tests need unwinding tables and these are not
# generated by default on all Targets.
self.cxx.compile_flags += ['-funwind-tables']
if not self.get_lit_bool('enable_threads', True):
self.cxx.compile_flags += ['-D_LIBUNWIND_HAS_NO_THREADS']
self.config.available_features.add('libunwind-no-threads')
super(Configuration, self).configure_compile_flags()
def configure_compile_flags_header_includes(self):
self.configure_config_site_header()
libunwind_headers = self.get_lit_conf(
'libunwind_headers',
os.path.join(self.libunwind_src_root, 'include'))
if not os.path.isdir(libunwind_headers):
self.lit_config.fatal("libunwind_headers='%s' is not a directory."
% libunwind_headers)
self.cxx.compile_flags += ['-I' + libunwind_headers]
def configure_link_flags_cxx_library(self):
# libunwind tests should not link with libc++
pass
def configure_link_flags_abi_library(self):
# libunwind tests should not link with libc++abi
pass
| libcudacxx-main | libunwind/test/libunwind/test/config.py |
libcudacxx-main | libunwind/test/libunwind/test/__init__.py |
|
# -*- coding: utf-8 -*-
#
# libunwind documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libunwind'
copyright = u'2011-%d, LLVM Project' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '12.0'
# The full version, including alpha/beta/rc tags.
release = '12.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libunwinddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'libunwind.tex', u'libunwind Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'libunwind', u'libunwind Documentation',
[u'LLVM project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'libunwind', u'libunwind Documentation',
u'LLVM project', 'libunwind', 'LLVM Unwinder',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configration.
intersphinx_mapping = {}
# -- Options for extensions ----------------------------------------------------
# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True
| libcudacxx-main | libunwind/docs/conf.py |
import argparse
import logging as log
import os
import time
from math import ceil, floor
from tensorboardX import SummaryWriter
import torch
import torch.distributed as dist
import torch.optim as optim
import torch.utils.data.distributed
from torch.multiprocessing import Process
from torch.autograd import Variable
from dataloading.dataloaders import get_loader
from model.model import VSRNet
from model.clr import cyclic_learning_rate
from nvidia.fp16 import FP16_Optimizer
from nvidia.fp16util import network_to_half
from nvidia.distributed import DistributedDataParallel
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed')
parser.add_argument('--root', type=str, default='.',
help='input data root folder')
parser.add_argument('--frames', type=int, default = 3,
help='num frames in input sequence')
parser.add_argument('--is_cropped', action='store_true',
help='crop input frames?')
parser.add_argument('--crop_size', type=int, nargs='+', default=[256, 256],
help='[height, width] for input crop')
parser.add_argument('--batchsize', type=int, default=1,
help='per rank batch size')
parser.add_argument('--loader', type=str, default='NVVL',
help='dataloader: pytorch or NVVL')
parser.add_argument('--rank', type=int, default=0,
help='pytorch distributed rank')
parser.add_argument('--world_size', default=2, type=int, metavar='N',
help='num processes for pytorch distributed')
parser.add_argument('--ip', default='localhost', type=str,
help='IP address for distributed init.')
parser.add_argument('--max_iter', type=int, default=1000,
help='num training iters')
parser.add_argument('--fp16', action='store_true',
help='train in fp16?')
parser.add_argument('--checkpoint_dir', type=str, default='.',
help='where to save checkpoints')
parser.add_argument('--min_lr', type=float, default=0.000001,
help='min learning rate for cyclic learning rate')
parser.add_argument('--max_lr', type=float, default=0.00001,
help='max learning rate for cyclic learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0004,
help='ADAM weight decay')
parser.add_argument('--flownet_path', type=str,
default='flownet2-pytorch/networks/FlowNet2-SD_checkpoint.pth.tar',
help='FlowNetSD weights path')
parser.add_argument('--image_freq', type=int, default=100,
help='num iterations between image dumps to Tensorboard ')
parser.add_argument('--timing', action='store_true',
help="Time data loading and model training (default: False)")
def main(args):
if args.rank == 0:
log.basicConfig(level=log.INFO)
writer = SummaryWriter()
writer.add_text('config', str(args))
else:
log.basicConfig(level=log.WARNING)
writer = None
torch.cuda.set_device(args.rank % args.world_size)
torch.manual_seed(args.seed + args.rank)
torch.cuda.manual_seed(args.seed + args.rank)
torch.backends.cudnn.benchmark = True
log.info('Initializing process group')
dist.init_process_group(
backend='nccl',
init_method='tcp://' + args.ip + ':3567',
world_size=args.world_size,
rank=args.rank)
log.info('Process group initialized')
log.info("Initializing dataloader...")
train_loader, train_batches, val_loader, val_batches, sampler = get_loader(args)
samples_per_epoch = train_batches * args.batchsize
log.info('Dataloader initialized')
model = VSRNet(args.frames, args.flownet_path, args.fp16)
if args.fp16:
network_to_half(model)
model.cuda()
model.train()
for param in model.FlowNetSD_network.parameters():
param.requires_grad = False
model_params = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.Adam(model_params, lr=1, weight_decay=args.weight_decay)
#optimizer = optim.SGD(model_params, lr=1,
# momentum=0.99, weight_decay=args.weight_decay)
stepsize = 2 * train_batches
clr_lambda = cyclic_learning_rate(args.min_lr, args.max_lr, stepsize)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[clr_lambda])
if args.fp16:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
model = DistributedDataParallel(model)
# BEGIN TRAINING
total_iter = 0
while total_iter * args.world_size < args.max_iter:
epoch = floor(total_iter / train_batches)
if args.loader == 'pytorch':
sampler.set_epoch(epoch)
model.train()
total_epoch_loss = 0.0
sample_timer = 0.0
data_timer = 0.0
compute_timer = 0.0
iter_start = time.perf_counter()
# TRAINING EPOCH LOOP
for i, inputs in enumerate(train_loader):
if args.loader == 'NVVL':
inputs = inputs['input']
else:
inputs = inputs.cuda(non_blocking=True)
if args.fp16:
inputs = inputs.half()
if args.timing:
torch.cuda.synchronize()
data_end = time.perf_counter()
optimizer.zero_grad()
im_out = total_iter % args.image_freq == 0
loss = model(Variable(inputs), i, writer, im_out)
total_epoch_loss += loss.item()
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
scheduler.step()
if args.rank == 0:
if args.timing:
torch.cuda.synchronize()
iter_end = time.perf_counter()
sample_timer += (iter_end - iter_start)
data_timer += (data_end - iter_start)
compute_timer += (iter_end - data_end)
torch.cuda.synchronize()
iter_start = time.perf_counter()
writer.add_scalar('learning_rate', scheduler.get_lr()[0], total_iter)
writer.add_scalar('train_loss', loss.item(), total_iter)
log.info('Rank %d, Epoch %d, Iteration %d of %d, loss %.5f' %
(dist.get_rank(), epoch, i+1, train_batches, loss.item()))
total_iter += 1
if args.rank == 0:
if args.timing:
sample_timer_avg = sample_timer / samples_per_epoch
writer.add_scalar('sample_time', sample_timer_avg, total_iter)
data_timer_avg = data_timer / samples_per_epoch
writer.add_scalar('sample_data_time', data_timer_avg, total_iter)
compute_timer_avg = compute_timer / samples_per_epoch
writer.add_scalar('sample_compute_time', compute_timer_avg, total_iter)
epoch_loss_avg = total_epoch_loss / train_batches
log.info('Rank %d, epoch %d: %.5f' % (dist.get_rank(), epoch, epoch_loss_avg))
model.eval()
total_loss = 0
total_psnr = 0
for i, inputs in enumerate(val_loader):
if args.loader == 'NVVL':
inputs = inputs['input']
else:
inputs = inputs.cuda(non_blocking=True)
if args.fp16:
inputs = inputs.half()
log.info('Validation it %d of %d' % (i + 1, val_batches))
loss, psnr = model(Variable(inputs), i, None)
total_loss += loss.item()
total_psnr += psnr.item()
loss = total_loss / i
psnr = total_psnr / i
if args.rank == 0:
writer.add_scalar('val_loss', loss, total_iter)
writer.add_scalar('val_psnr', psnr, total_iter)
log.info('Rank %d validation loss %.5f' % (dist.get_rank(), loss))
log.info('Rank %d validation psnr %.5f' % (dist.get_rank(), psnr))
if __name__=='__main__':
main(parser.parse_args())
| nvvl-master | examples/pytorch_superres/main.py |
import argparse
import os
import subprocess
def split_scenes(raw_data_path, out_data_path):
out_data_path = os.path.join(out_data_path,'orig','scenes')
if not os.path.isdir(os.path.join(out_data_path,'train')):
os.makedirs(os.path.join(out_data_path,'train'))
if not os.path.isdir(os.path.join(out_data_path,'val')):
os.makedirs(os.path.join(out_data_path,'val'))
start = "00:00:00.0"
with open("./data/timestamps") as f:
for i, line in enumerate(f.readlines()):
m, s = divmod(float(line), 60)
h, m = divmod(m, 60)
end = "%02d:%02d:%02d" %(h, m, s)
if i < 53:
subset = 'train'
else:
subset = 'val'
filepath = os.path.join(out_data_path, subset)
filename = os.path.join(filepath, 'scene_' + str(i) + '.mp4')
cmd = ["ffmpeg", "-i", raw_data_path, "-ss", start, "-to", end,
"-c:v", "copy", "-an", filename]
print("Running: ", ' '.join(cmd))
subprocess.run(cmd)
start = end
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data', type=str, default=None)
parser.add_argument('--out_data', type=str, default=None)
args = parser.parse_args()
assert args.raw_data is not None, 'Provide --raw_data path to Myanmar 4K mp4'
assert args.out_data is not None, 'Provide --raw_data path to Myanmar 4K mp4'
split_scenes(args.raw_data, args.out_data)
| nvvl-master | examples/pytorch_superres/tools/split_scenes.py |
import argparse
import os
import subprocess
default_format = "png"
default_qscale_jpg = "4"
def extract_frames(master_data, resolution, format, q, quiet,
transcoded, codec, crf, keyint):
if transcoded:
desc = [resolution, 'scenes']
desc += [codec] if codec else []
desc += ["crf"+crf] if crf else []
desc += ["keyint"+keyint] if keyint else []
in_path = os.path.join(master_data,*desc)
else:
if codec:
raise ValueError("--codec specified, but not --transcoded");
if crf:
raise ValueError("--crf specified, but not --transcoded");
if keyint:
raise ValueError("--keyint specified, but not --transcoded");
in_path = os.path.join(master_data,'orig','scenes')
desc = [resolution,'frames']
desc += [codec] if codec else []
desc += ["crf"+crf] if crf else []
desc += ["keyint"+keyint] if keyint else []
if not format:
format = default_format
else:
desc += [format]
if not q:
if format == "jpg":
q = default_qscale_jpg
else:
desc += ["q" + q]
out_path = os.path.join(master_data,*desc)
res_args = []
if resolution == '4K':
pass
else:
if resolution == '1080p':
res_str = '1920:1080'
elif resolution == '720p':
res_str = '1280:720'
elif resolution == '540p':
res_str = '960:540'
else:
raise ValueError("Unknown resolution")
res_args += ["-vf", "scale=%s" % res_str, "-sws_flags", "bilinear"]
codec_args = []
if format == "png":
if q:
codec_args += ["-compression_level", q]
elif format == "jpg":
codec_args += ["-q:v", q]
else:
raise ValueError("Unknown format")
if quiet:
cmdout = subprocess.DEVNULL
else:
cmdout = None
for subset_name, subset_dir in [('training', 'train'), ('validation', 'val')]:
if not os.path.exists(os.path.join(in_path,subset_dir)):
raise ValueError("No "+subset_name+" data found in "+in_path+", " +
"did you run split_scenes.py?")
for in_file in os.listdir(os.path.join(in_path,subset_dir)):
if in_file.endswith('.mp4'):
scene = in_file.split('_')[1].split('.')[0]
cur_out_path = os.path.join(out_path,subset_dir,scene)
if not os.path.isdir(cur_out_path):
os.makedirs(cur_out_path)
cur_in_path = os.path.join(in_path,subset_dir,in_file)
cmd = ["ffmpeg", "-n", "-i", cur_in_path]
cmd += res_args
cmd += codec_args
cmd += [os.path.join(cur_out_path, "%05d."+format)]
print("Running:", " ".join(cmd))
subprocess.run(cmd, stdout=cmdout, stderr=cmdout)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--master_data', type=str, required=True,
help="Path to root data directory")
parser.add_argument('--resolution', type=str, required=True,
choices=['4K', '1080p', '720p', '540p'])
parser.add_argument('--format', type=str,
choices=['png', 'jpg'])
parser.add_argument('-q', type=str,
help="quality to use for compression [2-31] for jpg and [0-9] for png")
parser.add_argument('--transcoded', action='store_true',
help="Use transcoded videos instead of original split video")
parser.add_argument('--quiet', action='store_true',
help="Suppress ffmpeg output")
parser.add_argument('--codec', type=str, default=None,
choices=['h264', 'hevc'],
help="codec of transcoded video to use")
parser.add_argument('--crf', type=str, default=None,
help="crf value of transcoded video to use")
parser.add_argument('--keyint', type=str, default=None,
help="keyframe interval of transcoded video to use")
args = parser.parse_args()
extract_frames(**vars(args))
| nvvl-master | examples/pytorch_superres/tools/extract_frames.py |
import argparse
import os
import subprocess
default_codec = "h264"
default_crf = "18"
default_keyint = "4"
def downsample_scenes(master_data, resolution, codec, crf, keyint, quiet):
desc = [resolution, 'scenes']
if not codec:
codec = default_codec
else:
desc += [codec]
assert codec in ['h264', 'hevc'], '--codec must be one of h264 or hevc'
if not crf:
crf = default_crf
else:
desc += ["crf" + crf]
if not keyint:
keyint = default_keyint
else:
desc += ["keyint" + keyint]
master_out_path = os.path.join(master_data,*desc)
print("Writing output files to:", master_out_path)
for subset in ['train', 'val']:
if not os.path.isdir(os.path.join(master_out_path,subset)):
os.makedirs(os.path.join(master_out_path,subset))
res_args = []
if resolution == '4K':
pass
else:
if resolution == '1080p':
res_str = '1920:1080'
elif resolution == '720p':
res_str = '1280:720'
elif resolution == '540p':
res_str = '960:540'
else:
raise ValueError("Unknown resolution")
res_args = ["-vf", "scale=%s" % res_str, "-sws_flags", "bilinear"]
codec_args = ["-preset", "slow"]
if codec == 'h264':
codec_args = ["-c:v", "libx264", "-g", keyint,
"-profile:v", "high"]
elif codec == 'hevc' or codec == 'h265':
codec_args = ["-c:v", "libx265", "-x265-params",
"keyint=%s:no-open-gop=1" % (keyint)]
else:
raise ValueError("Unknown codec")
if quiet:
cmdout = subprocess.DEVNULL
else:
cmdout = None
def transcode(in_path, out_path):
cmd = ["ffmpeg", "-y", "-i", in_path]
cmd += res_args
cmd += codec_args
cmd += ["-crf", crf, "-an", out_path]
print("Running:", " ".join(cmd))
subprocess.run(cmd, stdout=cmdout, stderr=cmdout)
for subset in ['train', 'val']:
for in_file in os.listdir(os.path.join(master_data,'orig','scenes',subset)):
if in_file.endswith('.mp4'):
in_path = os.path.join(master_data,'orig','scenes',subset,in_file)
out_path = os.path.join(master_out_path,subset,in_file)
transcode(in_path, out_path)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--master_data', type=str, default=None,
help="Path to root data directory")
parser.add_argument('--resolution', type=str, default=None,
help="one of '4K', '1080p', '720p', or '540p'")
parser.add_argument('--codec', type=str, default=None,
help="one of 'h264' or 'hevc'")
parser.add_argument('--crf', type=str, default=None,
help="crf value passed to ffmpeg")
parser.add_argument('--keyint', type=str, default=None,
help="keyframe interval")
parser.add_argument('--quiet', action='store_true',
help="Suppress ffmpeg output")
args = parser.parse_args()
assert args.master_data is not None, 'Provide --master_data path to root data directory containing split scenes'
assert args.resolution in ['4K', '1080p', '720p', '540p'], '--resolution must be one of 1080p, 720p, 540p'
downsample_scenes(**vars(args))
| nvvl-master | examples/pytorch_superres/tools/transcode_scenes.py |
import sys
import copy
from glob import glob
import math
import os
import torch
from torch.utils.data import DataLoader
from dataloading.datasets import imageDataset
import nvvl
class NVVL():
def __init__(self, frames, is_cropped, crop_size, root,
batchsize=1, device_id=0,
shuffle=False, distributed=False, fp16=False):
self.root = root
self.batchsize = batchsize
self.shuffle = shuffle
self.distributed = distributed
self.frames = frames
self.device_id = device_id
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*.mp4'))
if len(self.files) < 1:
print(("[Error] No video files in %s" % (self.root)))
raise LookupError
if fp16:
tensor_type = 'half'
else:
tensor_type = 'float'
self.image_shape = nvvl.video_size_from_file(self.files[0])
height = max(self.image_shape.height, self.crop_size[0])
width = max(self.image_shape.width, self.crop_size[1])
# Frames are enforced to be mod64 in each dimension
# as required by FlowNetSD convolutions
height = int(math.floor(height/64.)*64)
width = int(math.floor(width/64.)*64)
processing = {"input" : nvvl.ProcessDesc(type=tensor_type,
height=height,
width=width,
random_crop=self.is_cropped,
random_flip=False,
normalized=False,
color_space="RGB",
dimension_order="cfhw",
index_map=[0, 1, 2])}
dataset = nvvl.VideoDataset(self.files,
sequence_length=self.frames,
device_id=self.device_id,
processing=processing)
self.loader = nvvl.VideoLoader(dataset,
batch_size=self.batchsize,
shuffle=self.shuffle,
distributed=self.distributed)
def __len__(self):
return len(self.loader)
def __iter__(self):
return iter(self.loader)
def get_loader(args):
if args.loader == 'pytorch':
dataset = imageDataset(
args.frames,
args.is_cropped,
args.crop_size,
os.path.join(args.root, 'train'),
args.batchsize,
args.world_size)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
train_loader = DataLoader(
dataset,
batch_size=args.batchsize,
shuffle=(sampler is None),
num_workers=10,
pin_memory=True,
sampler=sampler,
drop_last=True)
effective_bsz = args.batchsize * float(args.world_size)
train_batches = math.ceil(len(dataset) / float(effective_bsz))
dataset = imageDataset(
args.frames,
args.is_cropped,
args.crop_size,
os.path.join(args.root, 'val'),
args.batchsize,
args.world_size)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
val_loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=True)
val_batches = math.ceil(len(dataset) / float(args.world_size))
elif args.loader == 'NVVL':
train_loader = NVVL(
args.frames,
args.is_cropped,
args.crop_size,
os.path.join(args.root, 'train'),
batchsize=args.batchsize,
shuffle=True,
distributed=True,
device_id=args.rank % 8,
fp16=args.fp16)
train_batches = len(train_loader)
val_loader = NVVL(
args.frames,
args.is_cropped,
args.crop_size,
os.path.join(args.root, 'val'),
batchsize=1,
shuffle=True,
distributed=True,
device_id=args.rank % 8,
fp16=args.fp16)
val_batches = len(val_loader)
sampler = None
else:
raise ValueError('%s is not a valid option for --loader' % args.loader)
return train_loader, train_batches, val_loader, val_batches, sampler
| nvvl-master | examples/pytorch_superres/dataloading/dataloaders.py |
import copy
import sys, time, argparse, os, subprocess, shutil
import math, numbers, random, bisect
from random import Random
from skimage import io, transform
from os import listdir
from os.path import join
from glob import glob
import numpy as np
import torch
import torch.utils.data as data
class imageDataset():
def __init__(self, frames, is_cropped, crop_size,
root, batch_size, world_size):
self.root = root
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*/*.png'))
if len(self.files) < 1:
print(("[Error] No image files in %s" % (self.root)))
raise LookupError
self.files = sorted(self.files)
self.total_frames = 0
# Find start_indices for different folders
self.start_index = [0]
prev_folder = self.files[0].split('/')[-2]
for (i, f) in enumerate(self.files):
folder = f.split('/')[-2]
if i > 0 and folder != prev_folder:
self.start_index.append(i)
prev_folder = folder
self.total_frames -= (self.frames + 1)
else:
self.total_frames += 1
self.total_frames -= (self.frames + 1)
self.start_index.append(i)
if self.is_cropped:
self.image_shape = self.crop_size
else:
self.image_shape = list(io.imread(self.files[0]).shape[:2])
# Frames are enforced to be mod64 in each dimension
# as required by FlowNetSD convolutions
self.frame_size = self.image_shape
self.frame_size[0] = int(math.floor(self.image_shape[0]/64.)*64)
self.frame_size[1] = int(math.floor(self.image_shape[1]/64.)*64)
self.frame_buffer = np.zeros((3, self.frames,
self.frame_size[0], self.frame_size[1]),
dtype = np.float32)
def __len__(self):
return self.total_frames
def __getitem__(self, index):
index = index % self.total_frames
# we want bisect_right here so that the first frame in a file gets the
# file, not the previous file
next_file_index = bisect.bisect_right(self.start_index, index)
if self.start_index[next_file_index] < index + self.frames:
index = self.start_index[next_file_index] - self.frames - 1
for (i, file_idx) in enumerate(range(index, index + self.frames)):
image = io.imread(self.files[file_idx])
#TODO(jbarker): Tidy this up and remove redundant computation
if i == 0 and self.is_cropped:
crop_x = random.randint(0, self.image_shape[1] - self.frame_size[1])
crop_y = random.randint(0, self.image_shape[0] - self.frame_size[0])
elif self.is_cropped == False:
crop_x = math.floor((self.image_shape[1] - self.frame_size[1]) / 2)
crop_y = math.floor((self.image_shape[0] - self.frame_size[0]) / 2)
self.crop_size = self.frame_size
image = image[crop_y:crop_y + self.crop_size[0],
crop_x:crop_x + self.crop_size[1],
:]
self.frame_buffer[:, i, :, :] = np.rollaxis(image, 2, 0)
return torch.from_numpy(self.frame_buffer)
| nvvl-master | examples/pytorch_superres/dataloading/datasets.py |
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from nvidia.loss_scaler import DynamicLossScaler, LossScaler
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
class FP16_Optimizer(object):
"""
FP16_Optimizer is designed to wrap an existing PyTorch optimizer,
and enable an fp16 model to be trained using a master copy of fp32 weights.
Args:
optimizer (torch.optim.optimizer): Existing optimizer containing initialized fp16 parameters. Internally, FP16_Optimizer replaces the passed optimizer's fp16 parameters with new fp32 parameters copied from the original ones. FP16_Optimizer also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy after each step.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale fp16 gradients computed by the model. Scaled gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so static_loss_scale should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any static_loss_scale option.
"""
def __init__(self, optimizer, static_loss_scale=1.0, dynamic_loss_scale=False):
if not torch.cuda.is_available:
raise SystemError('Cannot use fp16 without CUDA')
self.fp16_param_groups = []
self.fp32_param_groups = []
self.fp32_flattened_groups = []
for i, param_group in enumerate(optimizer.param_groups):
print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
for param in param_group['params']:
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
elif param.type() == 'torch.cuda.FloatTensor':
print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
fp32_flattened_this_group = None
if len(fp16_params_this_group) > 0:
fp32_flattened_this_group = _flatten_dense_tensors(
[param.detach().data.clone().float() for param in fp16_params_this_group])
fp32_flattened_this_group = Variable(fp32_flattened_this_group, requires_grad = True)
fp32_flattened_this_group.grad = fp32_flattened_this_group.new(
*fp32_flattened_this_group.size())
# python's lovely list concatenation via +
if fp32_flattened_this_group is not None:
param_group['params'] = [fp32_flattened_this_group] + fp32_params_this_group
else:
param_group['params'] = fp32_params_this_group
self.fp16_param_groups.append(fp16_params_this_group)
self.fp32_param_groups.append(fp32_params_this_group)
self.fp32_flattened_groups.append(fp32_flattened_this_group)
# print("self.fp32_flattened_groups = ", self.fp32_flattened_groups)
# print("self.fp16_param_groups = ", self.fp16_param_groups)
self.optimizer = optimizer.__class__(optimizer.param_groups)
# self.optimizer.load_state_dict(optimizer.state_dict())
self.param_groups = self.optimizer.param_groups
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
def zero_grad(self):
"""
Zero fp32 and fp16 parameter grads.
"""
self.optimizer.zero_grad()
for fp16_group in self.fp16_param_groups:
for param in fp16_group:
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_param_groups:
for param in group:
params.append(param)
for group in self.fp32_param_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _copy_grads_fp16_to_fp32(self):
for fp32_group, fp16_group in zip(self.fp32_flattened_groups, self.fp16_param_groups):
if len(fp16_group) > 0:
# This might incur one more deep copy than is necessary.
fp32_group.grad.data.copy_(
_flatten_dense_tensors([fp16_param.grad.data for fp16_param in fp16_group]))
def _downscale_fp32(self):
if self.loss_scale != 1.0:
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
param.grad.data.mul_(1./self.loss_scale)
def clip_fp32_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via torch.nn.utils.clip_grad_norm.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if self.overflow is True).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return torch.nn.utils.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def _copy_params_fp32_to_fp16(self):
for fp16_group, fp32_group in zip(self.fp16_param_groups, self.fp32_flattened_groups):
if len(fp16_group) > 0:
for fp16_param, fp32_data in zip(fp16_group,
_unflatten_dense_tensors(fp32_group.data, fp16_group)):
fp16_param.data.copy_(fp32_data)
def state_dict(self):
"""
Returns a dict containing the current state of this FP16_Optimizer instance.
This dict contains attributes of FP16_Optimizer, as well as the state_dict
of the contained Pytorch optimizer.
Untested.
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict.
Untested.
"""
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, step should be called after fp16_optimizer_obj.backward(loss).
step updates the fp32 master copy of parameters using the optimizer supplied to
FP16_Optimizer's constructor, then copies the updated fp32 params into the fp16 params
originally referenced by Fp16_Optimizer's constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, step may be called without a prior call to self.backward(loss).
However, the user should take care that any loss.backward() call within the closure
has been replaced by fp16_optimizer_obj.backward(loss).
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to FP16_Optimizer's constructor. closure should call zero_grad on the FP16_Optimizer object, compute the loss, call .backward(loss), and return the loss.
Closure example::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. note::
The only changes that need to be made compared to
`ordinary optimizer closures`_ are that "optimizer" itself should be an instance of
FP16_Optimizer, and that the call to loss.backward should be replaced by
optimizer.backward(loss).
.. warning::
Currently, calling step with a closure is not compatible with dynamic loss scaling.
.. _`ordinary optimizer closures`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
if closure is not None and isinstance(self.loss_scaler, DynamicLossScaler):
raise TypeError("Using step with a closure is currently not "
"compatible with dynamic loss scaling.")
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
print("OVERFLOW! Skipping step. Attempted loss scale: {}".format(scale))
return
if closure is not None:
self._step_with_closure(closure)
else:
self.optimizer.step()
self._copy_params_fp32_to_fp16()
return
def _step_with_closure(self, closure):
def wrapped_closure():
if self.first_closure_call_this_step:
"""
We expect that the fp16 params are initially fresh on entering self.step(),
so _copy_params_fp32_to_fp16() is unnecessary the first time wrapped_closure()
is called within self.optimizer.step().
"""
self.first_closure_call_this_step = False
else:
"""
If self.optimizer.step() internally calls wrapped_closure more than once,
it may update the fp32 params after each call. However, self.optimizer
doesn't know about the fp16 params at all. If the fp32 params get updated,
we can't rely on self.optimizer to refresh the fp16 params. We need
to handle that manually:
"""
self._copy_params_fp32_to_fp16()
"""
Our API expects the user to give us ownership of the backward() call by
replacing all calls to loss.backward() with optimizer.backward(loss).
This requirement holds whether or not the call to backward() is made within
a closure.
If the user is properly calling optimizer.backward(loss) within "closure,"
calling closure() here will give the fp32 master params fresh gradients
for the optimizer to play with,
so all wrapped_closure needs to do is call closure() and return the loss.
"""
temp_loss = closure()
return temp_loss
self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
def backward(self, loss, update_fp32_grads=True):
"""
fp16_optimizer_obj.backward performs the following conceptual operations:
fp32_loss = loss.float() (see first Note below)
scaled_loss = fp32_loss*loss_scale
scaled_loss.backward(), which accumulates scaled gradients into the .grad attributes of the
fp16 model's leaves.
fp16 grads are then copied to the stored fp32 params' .grad attributes (see second Note).
Finally, fp32 grads are divided by loss_scale.
In this way, after fp16_optimizer_obj.backward, the fp32 parameters have fresh gradients,
and fp16_optimizer_obj.step may be called.
.. note::
Converting the loss to fp32 before applying the loss scale provides some
additional safety against overflow if the user has supplied an fp16 value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
fp16_optimizer_obj.backward.
.. note::
The gradients found in an fp16 model's leaves after a call to
fp16_optimizer_obj.backward should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to fp16_optimizer_obj.backward,
only the master gradients should be regarded as valid, and can be retrieved via
:attr:`inspect_fp32_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_fp32_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay this copy, which is useful to eliminate redundant fp16->fp32 grad copies if fp16_optimizer_obj.backward is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling fp16_optimizer_obj.update_fp32_grads before calling fp16_optimizer_obj.step.
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_fp32_grads=False)
optimizer.backward(loss2, update_fp32_grads=False)
optimizer.update_fp32_grads()
"""
self.loss_scaler.backward(loss.float())
if update_fp32_grads:
self.update_fp32_grads()
def update_fp32_grads(self):
"""
Copy the .grad attribute from stored references to fp16 parameters to
the .grad attribute of the master fp32 parameters that are directly
updated by the optimizer. :attr:`update_fp32_grads` only needs to be called if
fp16_optimizer_obj.backward was called with update_fp32_grads=False.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._copy_grads_fp16_to_fp32()
self._downscale_fp32()
def inspect_fp32_grad_data(self):
"""
When running with FP16_Optimizer, .grad attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' .grad
attributes will contain valid gradients properly divided by the loss scale. However,
because :attr:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_fp32_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the .grad.data attributes of the fp32 master params belonging to that group.
"""
raise NotImplementedError("Currently not implemented, working on it...")
fp32_grads_each_group = []
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_fp32_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
return None
@property
def loss_scale(self):
return self.loss_scaler.loss_scale
| nvvl-master | examples/pytorch_superres/nvidia/fp16.py |
import torch
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Model wrapper that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def copy_in_params(net, params):
net_params = list(net.parameters())
for i in range(len(params)):
net_params[i].data.copy_(params[i].data)
def set_grad(params, params_with_grad):
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(param.data.new().resize_(*param.data.size()))
param.grad.data.copy_(param_w_grad.grad.data)
def BN_convert_float(module):
'''
Designed to work with network_to_half.
BatchNorm layers need parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
'''
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def backwards_debug_hook(grad):
print("Uh oh, master_params is receiving a gradient in the backward pass!")
def create_master_params(model):
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model.parameters()]).float()
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return master_params
def model_grads_to_master_grads(model, master_params):
master_params.grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model.parameters() if p.requires_grad]))
def master_params_to_model_params(model, master_params):
params = [param.data for param in model.parameters()]
for param, master in zip(params, _unflatten_dense_tensors(master_params.data, params)):
param.copy_(master)
def params_to_type(params, totype):
new_params = []
for param in params:
new_params.append(param.type(totype))
return new_params
def params_to_fp16(params):
return params_to_type(params, torch.cuda.HalfTensor)
def params_to_fp32(params):
return params_to_type(params, torch.cuda.FloatTensor)
def clone_params(net):
new_params = []
for param in list(net.parameters()):
new_params.append(param.data.clone())
return new_params
def clone_grads(net):
new_params = []
for param in list(net.parameters()):
new_params.append(param.grad.data.clone())
return new_params
def copy_into_params(net, input_tens):
net_params = list(net.parameters())
for i in range(len(input_tens)):
net_params[i].data.copy_(input_tens[i])
def copy_in_grads(params, params_with_grad):
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(param.data.new().resize_(*param.data.size()))
param.grad.data.copy_(param_w_grad.grad.data)
# NB: only implements overflow-based loss scaling for now.
class DynamicLossScaler:
def __init__(self,
init_scale=2.**15,
scale_factor=2.,
scale_window=100):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, tensors):
try:
for tens in tensors:
if tens is None:
continue
if DynamicLossScaler._has_inf_or_nan(tens):
return True
except TypeError:
return DynamicLossScaler._has_inf_or_nan(tensors)
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
if torch.is_tensor(x):
max_val = x.abs().max()
else:
max_val = x
if max_val == float('inf'):
return True
nan_count = torch.sum(x != x)
return nan_count > 0
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
self.cur_scale /= self.scale_factor
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
| nvvl-master | examples/pytorch_superres/nvidia/fp16util.py |
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed as dist
from torch.nn.modules import Module
def flat_dist_call(tensors, call, extra_args=None):
flat_dist_call.warn_on_half = True
buckets = {}
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
if flat_dist_call.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case.")
flat_dist_call.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
coalesced = _flatten_dense_tensors(bucket)
if extra_args is not None:
call(coalesced, *extra_args)
else:
call(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(bucket, _unflatten_dense_tensors(coalesced, bucket)):
buf.copy_(synced)
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
param_list = [param for param in self.module.state_dict().values() if torch.is_tensor(param)]
if dist._backend == dist.dist_backend.NCCL:
for param in param_list:
assert param.is_cuda, "NCCL backend only supports model parameters to be on GPU."
#broadcast parameters
flat_dist_call(param_list, dist.broadcast, (0,) )
#all reduce gradient hook
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
else:
return
grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]
flat_dist_call(grads, dist.all_reduce)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
| nvvl-master | examples/pytorch_superres/nvidia/distributed.py |
import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
inf_count = torch.sum(x.abs() == float('inf'))
if inf_count > 0:
return True
nan_count = torch.sum(x != x)
return nan_count > 0
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
| nvvl-master | examples/pytorch_superres/nvidia/loss_scaler.py |
from math import floor
# Cyclic learning rate
def cycle(iteration, stepsize):
return floor(1 + iteration / (2 * stepsize))
def abs_pos(cycle_num, iteration, stepsize):
return abs(iteration / stepsize - 2 * cycle_num + 1)
def rel_pos(iteration, stepsize):
return max(0, (1-abs_pos(cycle(iteration, stepsize), iteration, stepsize)))
def cyclic_learning_rate(min_lr, max_lr, stepsize):
return lambda iteration: min_lr + (max_lr - min_lr) * rel_pos(iteration, stepsize)
| nvvl-master | examples/pytorch_superres/model/clr.py |
import time
import scipy.misc
import numpy as np
from math import floor, log
import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
from torch.nn.functional import upsample
import sys
sys.path.append('flownet2-pytorch/networks')
try:
from submodules import *
except ModuleNotFoundError:
raise ModuleNotFoundError("flownet2-pytorch not found, did you update the git submodule?")
def lp_error(img1, img2, lp=2):
return torch.mean((img1 - img2)**lp)
def psnr(img1, img2):
mse = lp_error(img1, img2, 2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * (torch.log(PIXEL_MAX / torch.sqrt(mse)) / log(10))
def rgb2ycbcr(input_tensor):
# Conversion from RGB to YCbCr according to
# https://en.wikipedia.org/wiki/YCbCr?section=6#JPEG_conversion
# Expecting batch of RGB images with values in [0, 255]
kr = 0.299
kg = 0.587
kb = 0.114
# Expecting batch of image sequence inputs with values in [0, 255]
r = input_tensor[:, 0, :, :, :]
g = input_tensor[:, 1, :, :, :]
b = input_tensor[:, 2, :, :, :]
y = torch.unsqueeze(kr * r + kg * g + kb * b, 1)
cb = torch.unsqueeze(128 - (0.1687346 * r) - (0.331264 * g) + (0.5 * b), 1)
cr = torch.unsqueeze(128 + (0.5 * r) - (0.418688 * g) - (0.081312 * b), 1)
return y, cb, cr
def ycbcr2rgb(input_tensor):
# Conversion from YCbCr to RGB according to
# https://en.wikipedia.org/wiki/YCbCr?section=6#JPEG_conversion
# Expecting batch of YCbCr images with values in [0, 255]
y = input_tensor[:, 0, :, :]
cb = input_tensor[:, 1, :, :]
cr = input_tensor[:, 2, :, :]
r = y + 1.402 * (cr - 128)
g = y - 0.344136 * (cb - 128) - 0.714136 * (cr - 128)
b = y + 1.772 * (cb - 128)
r = torch.unsqueeze(r, 1)
g = torch.unsqueeze(g, 1)
b = torch.unsqueeze(b, 1)
return torch.clamp(torch.cat((r, g, b), 1), 0, 255)
def get_grid(batchsize, rows, cols, fp16):
# Input is a tensor with shape [batchsize, channels, rows, cols]
# Output is tensor with shape [batchsize, 2, rows, cols]
# where each col in [:, 1, :, :] and each row in [:, 0, :, :]
# is an evenly spaced arithmetic progression from -1.0 to 1.0
hor = torch.linspace(-1.0, 1.0, cols)
hor = hor.view(1, 1, 1, cols)
hor = hor.expand(batchsize, 1, rows, cols)
ver = torch.linspace(-1.0, 1.0, rows)
ver = ver.view(1, 1, rows, 1)
ver = ver.expand(batchsize, 1, rows, cols)
t_grid = torch.cat([hor, ver], 1)
if fp16:
return Variable(t_grid.half().cuda())
else:
return Variable(t_grid.cuda())
def tensorboard_image(name, image, iteration, writer):
out_im = np.moveaxis(image.data.cpu().numpy(), 0, 2)
writer.add_image(name, out_im, iteration)
class VSRNet(nn.Module):
def __init__(self, frames=3, flownet_path='', fp16=False):
super(VSRNet, self).__init__()
self.frames = frames
self.fp16 = fp16
self.mi = floor(self.frames / 2)
self.pooling = nn.AvgPool2d(4)
self.upsample = nn.Upsample(scale_factor=4, mode='bilinear')
if fp16:
#from FlowNetSD16 import FlowNetSD
from FlowNetSD import FlowNetSD
else:
from FlowNetSD import FlowNetSD
FlowNetSD_network = FlowNetSD(args=[], batchNorm=False)
try:
FlowNetSD_weights = torch.load(flownet_path)['state_dict']
except:
raise IOError('FlowNet weights could not be loaded from %s' % flownet_path)
FlowNetSD_network.load_state_dict(FlowNetSD_weights)
self.FlowNetSD_network = FlowNetSD_network
self.train_grid = None
self.val_grid = None
self.batchNorm = True
self.conv1 = conv(self.batchNorm, 1, 64, kernel_size=9)
self.conv2 = conv(self.batchNorm, 64 * self.frames, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32, 1, kernel_size=5, stride=1, padding=2, bias=True)
self.conv3.weight = torch.nn.init.normal(self.conv3.weight, 0, 0.1)
def forward(self, inputs, iteration, writer, im_out=False):
batchsize, channels, frames, rows, cols = inputs.size()
y, cb, cr = rgb2ycbcr(inputs)
y /= 255
target = y[:, :, self.mi, :, :]
if writer is not None and im_out:
out_im = inputs[0, :, self.mi, :, :] / 255.0
tensorboard_image('target', out_im, iteration, writer)
out_im = self.pooling(out_im)
tensorboard_image('downsampled', out_im, iteration, writer)
out_im = self.upsample(out_im.unsqueeze(0)).squeeze(0)
tensorboard_image('upsampled', out_im, iteration, writer)
# Compute per RGB channel mean across pixels for each image in input batch
rgb_mean = inputs.view((batchsize, channels) + (-1, )).float().mean(dim=-1)
rgb_mean = rgb_mean.view((batchsize, channels) + (1, 1, 1, ))
if self.fp16:
rgb_mean = rgb_mean.half()
inputs = (inputs - rgb_mean) / 255
if self.training:
if self.train_grid is None:
self.train_grid = get_grid(batchsize, rows, cols, self.fp16)
grid = self.train_grid
else:
if self.val_grid is None:
self.val_grid = get_grid(batchsize, rows, cols, self.fp16)
grid = self.val_grid
grid.requires_grad = False
downsampled_input = self.pooling(cb[:, :, self.mi, :, :])
cb[:, :, self.mi, :, :] = self.upsample(downsampled_input)
downsampled_input = self.pooling(cr[:, :, self.mi, :, :])
cr[:, :, self.mi, :, :] = self.upsample(downsampled_input)
conv1_out = []
for fr in range(self.frames):
downsampled_input = self.pooling(y[:, :, fr, :, :])
y[:, :, fr, :, :] = self.upsample(downsampled_input)
if fr == self.mi:
conv1_out.append(self.conv1(y[:, :, self.mi, :, :]))
else:
im1 = inputs[:, :, fr, :, :]
im2 = inputs[:, :, self.mi, :, :]
im_pair = torch.cat((im2, im1), 1)
to_warp = y[:, :, fr, :, :]
flow = self.upsample(self.FlowNetSD_network(im_pair)[0]) / 16
flow = torch.cat([flow[:, 0:1, :, :] / ((cols - 1.0) / 2.0),
flow[:, 1:2, :, :] / ((rows - 1.0) / 2.0)], 1)
warped = torch.nn.functional.grid_sample(
input=to_warp,
grid=(grid + flow).permute(0, 2, 3, 1),
mode='bilinear',
padding_mode='border')
conv1_out.append(self.conv1(warped))
conv1_out = torch.cat(conv1_out, 1)
conv2_out = self.conv2(conv1_out)
# Loss must be computed for pixel values in [0, 255] to prevent
# divergence in fp16
prediction = torch.nn.functional.sigmoid(self.conv3(conv2_out).float())
loss = torch.nn.functional.mse_loss(prediction.float(), target.float())
if not self.training:
# Following [1], remove 12 pixels around border to prevent
# convolution edge effects affecting PSNR
psnr_metric = psnr(prediction[:, :, 12:, :-12].float() * 255,
target[:, :, 12:, :-12].float() * 255)
prediction = ycbcr2rgb(torch.cat((prediction * 255, cb[:, :, self.mi, :, :],
cr[:, :, self.mi, :, :]), 1)) / 255
if writer is not None and im_out:
out_im = prediction[0, :, :, :]
tensorboard_image('prediction', out_im, iteration, writer)
if self.training:
return loss
else:
return loss, psnr_metric
# [1] Osama Makansi, Eddy Ilg, Thomas Brox, "End-to-End Learning of Video Super-Resolution with Motion Compensation", https://arxiv.org/abs/1707.00471
| nvvl-master | examples/pytorch_superres/model/model.py |
import os
import torch
import subprocess
from string import Template
from torch.utils.ffi import create_extension
this_file = os.path.dirname(__file__)
nvvl_path = os.path.join(this_file, "../")
sources = ['nvvl/src/nvvl_th.c']
headers = ['nvvl/src/nvvl_generated.h',
'nvvl/src/nvvl_th.h']
defines = [('WITH_CUDA', None)]
include_dirs = [os.path.join(nvvl_path, "include")]
subprocess.call(["make"])
# if not torch.cuda.is_available():
# raise RuntimeError('CUDA must be available to use this package.')
ffi = create_extension(
'nvvl.lib',
package=True,
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=True,
extra_objects=[],
extra_compile_args=['-std=c99'],
include_dirs=include_dirs,
libraries=['nvvl']
)
if __name__ == '__main__':
ffi.build()
| nvvl-master | pytorch/build.py |
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext as build_ext_orig
from distutils.command.build import build as build_orig
from setuptools.command.install import install as install_orig
from setuptools.command.develop import develop as develop_orig
from distutils.errors import DistutilsFileError, DistutilsArgError
from distutils.dir_util import mkpath
from distutils.spawn import spawn
import build
this_file = os.path.dirname(__file__)
nvvl_path = os.path.join(this_file, "../")
mycmdclass = {}
for super_class in [build_ext_orig, build_orig, install_orig, develop_orig]:
class command(super_class):
user_options = super_class.user_options + [
('with-nvvl=', None, 'Location of built nvvl library'),
('system-nvvl', None, 'Use the system installed nvvl library'),
]
def initialize_options(self):
super().initialize_options()
self.with_nvvl = None
self.system_nvvl = None
mycmdclass[super_class.__name__] = command
def run_build(self):
if self.with_nvvl:
if self.system_nvvl:
raise DistutilsArgError("system-nvvl and with-nvvl are mutually exclusive")
libpath = os.path.join(self.with_nvvl, "libnvvl.so")
if not os.path.isfile(libpath):
raise DistutilsFileError("Provided with-nvvl path, but " + libpath + " doesn't exit.")
for ext in self.extensions:
ext.library_dirs += [self.with_nvvl]
self.distribution.data_files = [
('nvvl/lib', [libpath])]
elif not self.system_nvvl:
output_dir = os.path.join(self.build_temp, "nvvl-build")
mkpath(output_dir, 0o777, dry_run=self.dry_run)
cmake_cmd = ["cmake", "-B"+output_dir, "-H"+nvvl_path]
spawn(cmake_cmd, dry_run=self.dry_run)
make_cmd = ["make", "-C", output_dir, "-j4"]
spawn(make_cmd, dry_run=self.dry_run)
for ext in self.extensions:
ext.library_dirs += [output_dir]
ext.runtime_library_dirs = ["$ORIGIN"]
self.distribution.data_files = [
('nvvl/lib', [os.path.join(output_dir, "libnvvl.so")])]
build_ext_orig.run(self)
def finalize_options_build(self):
build_ext_orig.finalize_options(self)
for cmd in ['install', 'develop', 'build']:
self.set_undefined_options(cmd,
('system_nvvl', 'system_nvvl'),
('with_nvvl', 'with_nvvl')
)
mycmdclass["build_ext"].run = run_build
mycmdclass["build_ext"].finalize_options = finalize_options_build
setup(
name="nvvl",
version="1.0",
description="Read frame sequences from a video file",
license="BSD",
url="https://github.com/NVIDIA/nvvl",
author="Jared Casper",
author_email="[email protected]",
install_requires=["cffi>=1.0.0"],
setup_requires=["cmake", "cffi>=1.0.0"],
packages=find_packages(exclude=["build"]),
ext_package="",
cffi_modules=[
os.path.join(this_file, "build.py:ffi")
],
cmdclass=mycmdclass
)
| nvvl-master | pytorch/setup.py |
from .dataset import VideoDataset,ProcessDesc
from .loader import VideoLoader
def video_size_from_file(filename):
return lib.nvvl_video_size_from_file(str.encode(filename))
| nvvl-master | pytorch/nvvl/__init__.py |
import bisect
import cffi
import collections
import random
import sys
import torch
import torch.utils.data
from . import lib
class ProcessDesc(object):
"""Describes processing done on a decoded frame.
Parameters
----------
type : string, optional
Type of the output, can be one of "float", "half", or "byte"
(Default: "float")
width, height : int, optional
width and height to crop frame to, set to 0 for scaled frame
size (Default: 0)
scale_width, scale_height : int, optional
width and height to scale image to before cropping, set to 0
for no scaling (Default: 0)
normalized : bool, optional
Normalize all values to [0, 1] instead of [0, 255] (Default: False)
random_crop : bool, optional
If True, the origin of the crop is randomly choosen. If False,
the crop originates at (0, 0). (Default: False)
random_flip : bool, optional
If True, flip the image horizontally before cropping. (Default: False)
color_space : enum, optional
Color space to return images in, one of "RGB" or "YCbCr". (Default: RGB)
index_map : list of ints, optional
Map from indices into the decoded sequence to indices in this Layer.
None indicates a 1-to-1 mapping of the frames from sequence to
layer.
For examples, To reverse the frames of a 5 frame sequence, set
index_map to [4, 3, 2, 1, 0].
An index of -1 indicates that the decoded frame should not
be used in this layer. For example, to extract just the
middle frame from a 5 frame sequence, set index_map to
[-1, -1, 0, -1, -1].
The returned tensors will be sized to fit the maximum index in
this array (if it is provided, they will fit the full sequence
if it is None).
(Default: None)
dimension_order : string, optional
Order of dimensions in the returned tensors. Must contain
exactly one of 'f', 'c', 'h', and 'w'. 'f' for frames in the
sequence, 'c' for channel, 'h' for height, 'w' for width, and
'h'. (Default: "fchw")
"""
def __init__(self, type="float",
width=0, height=0, scale_width=0, scale_height=0,
normalized=False, random_crop=False, random_flip=False,
color_space="RGB", index_map=None, dimension_order="fchw"):
self.ffi = lib._ffi
self._desc = self.ffi.new("struct NVVL_LayerDesc*")
self.width = width
self.height = height
self.scale_width = scale_width
self.scale_height = scale_height
self.normalized = normalized
self.random_crop = random_crop
self.random_flip = random_flip
if index_map:
self.index_map = self.ffi.new("int[]", index_map)
self.count = max(index_map) + 1
self.index_map_length = len(index_map)
else:
self.index_map = self.ffi.NULL
self.count = 0
self.index_map_length = 0
if color_space.lower() == "rgb":
self.color_space = lib.ColorSpace_RGB
self.channels = 3
elif color_space.lower() == "ycbcr":
self.color_space = lib.ColorSpace_YCbCr
self.channels = 3
else:
raise ValueError("Unknown color space")
if type == "float":
self.tensor_type = torch.cuda.FloatTensor
elif type == "half":
self.tensor_type = torch.cuda.HalfTensor
elif type == "byte":
self.tensor_type = torch.cuda.ByteTensor
else:
raise ValueError("Unknown type")
self.dimension_order = dimension_order
def _get_dim(self, dim):
if dim == 'c':
return self.channels
elif dim == 'f':
return self.count
elif dim == 'h':
return self.height
elif dim == 'w':
return self.width
raise ValueError("Invalid dimension")
def get_dims(self):
dims = []
for d in self.dimension_order:
dims.append(self._get_dim(d))
return dims
def __getattribute__(self, name):
try:
d = super().__getattribute__("_desc")
return d.__getattribute__(name)
except AttributeError:
return super().__getattribute__(name)
raise AttributeError()
def __setattr__(self, name, value):
try:
self._desc.__setattr__(name, value)
except:
super().__setattr__(name, value)
def desc(self):
return self._desc
log_levels = {
"debug" : lib.LogLevel_Debug,
"info" : lib.LogLevel_Info,
"warn" : lib.LogLevel_Warn,
"error" : lib.LogLevel_Error,
"none" : lib.LogLevel_None
}
class VideoDataset(torch.utils.data.Dataset):
"""VideoDataset
Parameters
----------
filenames : collection of strings
list of video files to draw from
sequence_length : int
how many frames are in each sample
device_id : int, optional
GPU device to use (Default: 0)
get_label : callable, optional
callable with signature:
(filename : str, frame_num : int) : anything
The returned value is simply passed as an output
alongside any returned frames.
If None, label returned is None. (Default: None)
processing : dict {string -> ProcessDesc}, optional
Describes processing to be done on the sequence to generate
each data item. If None, each frame in the sequence will be
returned as is. (Default: None)
log_level : string, optional
One of "debug", "info", "warn", "error", or "none".
(Default: "warn")
"""
def __init__(self, filenames, sequence_length, device_id=0,
get_label=None, processing=None, log_level="warn"):
self.ffi = lib._ffi
self.filenames = filenames
self.sequence_length = sequence_length
self.device_id = device_id
self.get_label = get_label if get_label is not None else lambda x,y,z: None
self.processing = processing
if self.processing is None:
self.processing = {"default" : ProcessDesc()}
elif "labels" in processing and get_label is not None:
raise KeyError("Processing must not have a 'labels' key when get_label is not None.")
try:
log_level = log_levels[log_level]
except KeyError:
print("Invalid log level", log_level, "using warn.", file=sys.stderr)
log_level = lib.LogLevel_Warn
if not filenames:
raise ValueError("Empty filenames list given to VideoDataset")
if sequence_length < 1:
raise ValueError("Sequence length must be at least 1")
self.loader = lib.nvvl_create_video_loader_with_log(self.device_id, log_level)
self.total_frames = 0
self.frame_counts = []
self.start_index = []
for f in filenames:
count = lib.nvvl_frame_count(self.loader, str.encode(f));
if count < self.sequence_length:
print("NVVL WARNING: Ignoring", f, "because it only has", count,
"frames and the sequence length is", self.sequence_length)
continue
count = count - self.sequence_length + 1
self.frame_counts.append(count)
self.total_frames += count
self.start_index.append(self.total_frames) # purposefully off by one for bisect to work
size = lib.nvvl_video_size(self.loader)
self.width = size.width
self.height = size.height
for name, desc in self.processing.items():
if desc.width == 0:
if desc.scale_width == 0:
desc.width = self.width
else:
desc.width = desc.scale_width
if desc.height == 0:
if desc.scale_height == 0:
desc.height = self.height
else:
desc.height = desc.scale_height
if desc.count == 0:
desc.count = self.sequence_length
self.samples_left = 0
self.seq_queue = collections.deque()
self.seq_info_queue = collections.deque()
self.get_count = 0
self.get_count_warning_threshold = 1000
self.disable_get_warning = False
def get_stats(self):
return lib.nvvl_get_stats(self.loader)
def reset_stats(self):
return lib.nvvl_reset_stats(self.loader)
def set_log_level(self, level):
"""Sets the log level from now forward
Parameters
----------
level : string
The log level, one of "debug", "info", "warn", "error", or "none"
"""
lib.nvvl_set_log_level(self.loader, log_levels[level])
def _read_sample(self, index):
# we want bisect_right here so the first frame in a file gets the file, not the previous file
file_index = bisect.bisect_right(self.start_index, index)
frame = index - self.start_index[file_index - 1] if file_index > 0 else index
filename = self.filenames[file_index]
lib.nvvl_read_sequence(self.loader, str.encode(filename),
frame, self.sequence_length)
self.seq_info_queue.append((filename, frame))
self.samples_left += 1
def _get_layer_desc(self, desc):
d = desc.desc()
changes = {}
if (desc.random_crop and (self.width > desc.width)):
d.crop_x = random.randint(0, self.width - desc.width)
else:
d.crop_x = 0
changes['crop_x'] = d.crop_x
if (desc.random_crop and (self.height > desc.height)):
d.crop_y = random.randint(0, self.height - desc.height)
else:
d.crop_y = 0
changes['crop_y'] = d.crop_y
if (desc.random_flip):
d.horiz_flip = random.random() < 0.5
else:
d.horiz_flip = False
changes['horiz_flip'] = d.horiz_flip
return d[0], changes
def _start_receive(self, tensor_map, index=0):
seq = lib.nvvl_create_sequence_device(self.sequence_length, self.device_id)
rand_changes = {}
for name, desc in self.processing.items():
tensor = tensor_map[name]
layer = self.ffi.new("struct NVVL_PicLayer*")
if desc.tensor_type == torch.cuda.FloatTensor:
layer.type = lib.PDT_FLOAT
elif desc.tensor_type == torch.cuda.HalfTensor:
layer.type = lib.PDT_HALF
elif desc.tensor_type == torch.cuda.ByteTensor:
layer.type = lib.PDT_BYTE
strides = tensor[index].stride()
try:
desc.stride.x = strides[desc.dimension_order.index('w')]
desc.stride.y = strides[desc.dimension_order.index('h')]
desc.stride.n = strides[desc.dimension_order.index('f')]
desc.stride.c = strides[desc.dimension_order.index('c')]
except ValueError:
raise ValueError("Invalid dimension order")
layer.desc, rand_changes[name] = self._get_layer_desc(desc)
if desc.index_map_length > 0:
layer.index_map = desc.index_map
layer.index_map_length = desc.index_map_length
else:
layer.index_map = self.ffi.NULL
layer.data = self.ffi.cast("void*", tensor[index].data_ptr())
lib.nvvl_set_layer(seq, layer, str.encode(name))
filename, frame = self.seq_info_queue.popleft()
self.seq_queue.append(seq)
lib.nvvl_receive_frames(self.loader, seq)
return seq, self.get_label(filename, frame, rand_changes)
def _finish_receive(self, synchronous=False):
if not self.seq_queue:
raise RuntimeError("Unmatched receive")
if self.samples_left <= 0:
raise RuntimeError("No more samples left in decoder pipeline")
seq = self.seq_queue.popleft()
if synchronous:
lib.nvvl_sequence_wait(seq)
else:
lib.nvvl_sequence_stream_wait_th(seq)
lib.nvvl_free_sequence(seq)
self.samples_left -= 1
def _create_tensor_map(self, batch_size=1):
tensor_map = {}
with torch.cuda.device(self.device_id):
for name, desc in self.processing.items():
tensor_map[name] = desc.tensor_type(batch_size, *desc.get_dims())
return tensor_map
def __getitem__(self, index):
if (self.samples_left > 0):
raise RuntimeError("Can't synchronously get an item when asyncronous frames are pending")
self.get_count += 1
if (self.get_count > self.get_count_warning_threshold
and not self.disable_get_warning):
print("WARNING: Frequent use of VideoDataset's synchronous get operation\n"
"detected. This operation is slow and should only be used for \n"
"debugging and other limited cases. To turn this warning off, set\n"
"the disable_get_warning attribute of the VideoDataset to True.\n")
self.disable_get_warning = True
self._read_sample(index)
tensor_map = self._create_tensor_map()
seq, label = self._start_receive(tensor_map)
self._finish_receive(True)
tensor_map_cpu = {name: tensor[0].cpu() for name, tensor in tensor_map.items()}
if label is not None:
tensor_map_cpu['labels'] = label
if len(tensor_map_cpu) == 1 and "default" in tensor_map_cpu:
return tensor_map_cpu["default"][0].cpu()
return tensor_map_cpu
def __len__(self):
return self.total_frames
| nvvl-master | pytorch/nvvl/dataset.py |
import collections
import torch
from .dataset import VideoDataset
class VideoLoader(object):
"""Loads batches of sequences of frames from a video file. Meant to be
nearly a drop-in replacement for a torch.util.data.DataLoader.
Parameters
----------
dataset : VideoDataset
dataset from which to load the frames, must be a
nvvl.VideoDataset.
batch_size : int, optional
how many samples (i.e. sequences) per batch to load (Default: 1)
shuffle : bool, optional
shuffle the order of samples (Default: False)
distributed : bool, optional
use a distributed sampler, requires shuffle (Default: False)
sampler : torch.utils.data.Sampler, optional
defines the strategy to draw samples from the
dataset. Mutually exclusive with shuffle and distributed.
batch_sampler : torch.utils.data.Sampler, optional
like sampler, but returns a batch of indices at a
time. Mutually exclusive with batch_size, shuffle,
distributed, sampler, and drop_last.
drop_last : bool, optional
drop the last incomplete batch. It is currently not
implemented to have this set to False. (Default: True)
buffer_length : int, optional
number of batches to preload (Default: 3)
"""
def __init__(self, dataset, batch_size=1, shuffle=False,
distributed=False, sampler=None,
batch_sampler=None, drop_last=True, buffer_length=3):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
if batch_sampler is not None:
if (batch_size > 1 or shuffle or distributed
or sampler is not None or drop_last):
raise ValueError('batch_sampler is mutually exclusive with '
'batch_size, shuffle, distributed, sampler, '
'and drop_last')
if sampler is not None:
if shuffle or distributed:
raise ValueError("sampler is mutually exclusive with shuffle and distributed")
if batch_sampler is None:
if sampler is None:
if distributed:
if not shuffle:
raise ValueError("pytorch distributed is always shuffled")
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
elif shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
self.tensor_queue = collections.deque()
self.batch_size_queue = collections.deque()
self.buffer_length = buffer_length
def _receive_batch(self):
batch_size = self.batch_size_queue.popleft()
t = self.dataset._create_tensor_map(batch_size)
labels = []
for i in range(batch_size):
_, label = self.dataset._start_receive(t, i)
labels.append(label)
self.tensor_queue.append((batch_size, t, labels))
def get_stats(self):
return self.dataset.get_stats()
def reset_stats(self):
return self.dataset.reset_stats()
def __len__(self):
return len(self.batch_sampler)
def __next__(self):
if not self.tensor_queue:
assert self.dataset.samples_left == 0, "Tensor queue is empty but there are samples left in the VideoDataset"
raise StopIteration
# first fire off a receive to keep the pipe filled
if self.batch_size_queue:
self._receive_batch()
batch_size, t, labels = self.tensor_queue.popleft()
for i in range(batch_size):
self.dataset._finish_receive()
if any(label is not None for label in labels):
t["labels"] = labels
return t
def __iter__(self):
if self.dataset.samples_left != 0:
raise RuntimeError("Need to exhaust iterator before creating a new one")
for b in iter(self.batch_sampler):
for i in b:
self.dataset._read_sample(i)
self.batch_size_queue.append(len(b))
for i in range(self.buffer_length):
self._receive_batch()
return self
| nvvl-master | pytorch/nvvl/loader.py |
from __future__ import print_function
import argparse
from glob import glob
import os
import sys
import time
import torch
import nvvl
import psutil
from dataloading.dataloaders import get_loader
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, required=True,
help='folder of mp4/png files')
parser.add_argument('--sleep', type=float, required=True,
help='dummy computation time')
parser.add_argument('--loader', type=str, required=True,
help='dataloader: pytorch/NVVL/lintel')
parser.add_argument('--batchsize', type=int, default=8,
help='batch size loaded')
parser.add_argument('--frames', type=int, default=3,
help='number of frames in each loaded sequence')
parser.add_argument('--is_cropped', action='store_true',
help='crop input frames?')
parser.add_argument('--crop_size', type=int, nargs='+', default=[-1, -1],
help='[height, width] for input crop')
parser.add_argument('--fp16', action='store_true',
help='load data in fp16?')
def main(args):
assert args.sleep >= 0.0, print('Computation time must be >=0.0s')
print(str(args) + '\n')
loader, batches = get_loader(args)
counter = 0
data_time_sum = 0
iter_time_sum = 0
cpu_sum = 0
mem_sum = 0
for epoch in range(2):
start = time.time()
for i, x in enumerate(loader, 1):
if args.loader != 'NVVL':
x = x.cuda()
if args.fp16:
x = x.half()
if epoch > 0:
counter += 1
end = time.time()
data_t = end-start
if args.sleep > 0.0:
time.sleep(args.sleep)
end = time.time()
iter_t = end-start
data_time_sum += data_t
iter_time_sum += iter_t
c = psutil.cpu_percent()
cpu_sum += c
m = psutil.virtual_memory().percent
mem_sum += m
start = time.time()
data_time_ave = data_time_sum / counter
iter_time_ave = iter_time_sum / counter
cpu_ave = cpu_sum / counter
mem_ave = mem_sum / counter
print("Data loading time avg, iteration time avg, cpu load avg, memory usage avg")
print("%.5f %.5f %.2f %.2f" % (data_time_ave, iter_time_ave, cpu_ave, mem_ave))
if __name__=='__main__':
main(parser.parse_args())
| nvvl-master | pytorch/test/benchmark.py |
import sys
import copy
from glob import glob
import math
import os
import torch
from torch.utils.data import DataLoader
from dataloading.datasets import imageDataset, lintelDataset
import nvvl
class NVVL():
def __init__(self, frames, is_cropped, crop_size, root,
batchsize=1, device_id=0,
shuffle=False, fp16=False):
self.root = root
self.batchsize = batchsize
self.shuffle = shuffle
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*.mp4'))
if len(self.files) < 1:
print(("[Error] No video files in %s" % (self.root)))
raise LookupError
if fp16:
tensor_type = 'half'
else:
tensor_type = 'float'
self.image_shape = nvvl.video_size_from_file(self.files[0])
height = max(self.image_shape.height, self.crop_size[0])
width = max(self.image_shape.width, self.crop_size[1])
print("Video size: ", height, "x", width, "\n")
processing = {"input" : nvvl.ProcessDesc(type=tensor_type,
height=height,
width=width,
random_crop=self.is_cropped,
random_flip=False,
normalized=False,
color_space="RGB",
dimension_order="cfhw",
index_map=[0, 1, 2])}
dataset = nvvl.VideoDataset(self.files,
sequence_length=self.frames,
processing=processing)
self.loader = nvvl.VideoLoader(dataset,
batch_size=self.batchsize,
shuffle=self.shuffle)
def __len__(self):
return len(self.loader)
def __iter__(self):
return iter(self.loader)
def get_loader(args):
if args.loader == 'pytorch':
dataset = imageDataset(
args.frames,
args.is_cropped,
args.crop_size,
args.root,
args.batchsize)
sampler = torch.utils.data.sampler.RandomSampler(dataset)
train_loader = DataLoader(
dataset,
batch_size=args.batchsize,
shuffle=(sampler is None),
num_workers=10,
pin_memory=True,
sampler=sampler,
drop_last=True)
train_batches = len(dataset)
elif args.loader == 'lintel':
dataset = lintelDataset(
args.frames,
args.is_cropped,
args.crop_size,
args.root,
args.batchsize)
sampler = torch.utils.data.sampler.RandomSampler(dataset)
train_loader = DataLoader(
dataset,
batch_size=args.batchsize,
shuffle=(sampler is None),
num_workers=10,
pin_memory=True,
sampler=sampler,
drop_last=True)
train_batches = len(dataset)
elif args.loader == 'NVVL':
train_loader = NVVL(
args.frames,
args.is_cropped,
args.crop_size,
args.root,
batchsize=args.batchsize,
shuffle=True,
fp16=args.fp16)
train_batches = len(train_loader)
else:
raise ValueError('%s is not a valid option for --loader' % args.loader)
return train_loader, train_batches
| nvvl-master | pytorch/test/dataloading/dataloaders.py |
import copy
import sys, time, argparse, os, subprocess, shutil
import math, numbers, random, bisect
import subprocess
from random import Random
from skimage import io, transform
from os import listdir
from os.path import join
from glob import glob
import numpy as np
import torch
import torch.utils.data as data
import nvvl
import lintel
class dataset(object):
def __init__(self, width, height, frames):
self.width = width
self.height = height
self.num_frames = frames
class lintelDataset():
def __init__(self, frames, is_cropped, crop_size,
root, batch_size, frame_size = [-1, -1]):
self.root = root
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*.mp4'))
assert len(self.files) > 1, "[Error] No video files in %s" % self.root
image_shape = nvvl.video_size_from_file(self.files[0])
self.image_shape = [image_shape.height, image_shape.width]
self.frame_size = frame_size
print("Video size: ", self.image_shape[0], "x", self.image_shape[1], "\n")
if self.is_cropped:
self.frame_size = self.crop_size
else:
self.frame_size = self.image_shape
self.dataset = dataset(width=self.image_shape[1],
height=self.image_shape[0],
frames=self.frames)
self.gt_step = (self.frames - 1) * 2 + 1
self.total_frames = 0
self.frame_counts = []
self.start_index = []
self.videos = []
for i, filename in enumerate(self.files):
with open(filename, 'rb') as f:
video = f.read()
self.videos.append(video)
cmd = ["ffprobe", "-v", "error", "-count_frames", "-select_streams",
"v:0", "-show_entries", "stream=nb_frames", "-of",
"default=nokey=1:noprint_wrappers=1", filename]
count = int(subprocess.check_output(cmd))
count -= self.gt_step
if count < 0:
print("[Warning] Video does not have enough frames\n\t%s" % f)
count = 0
self.total_frames += count
self.frame_counts.append(count)
self.start_index.append(self.total_frames)
assert self.total_frames >= 1, "[Error] Not enough frames at \n\t%s" % self.root
self.frame_buffer = np.zeros((3, self.frames,
self.frame_size[0], self.frame_size[1]),
dtype = np.float32)
def __len__(self):
return self.total_frames
def __getitem__(self, index):
index = index % self.total_frames
# we want bisect_rigtht here so that the first frame in a file gets the
# file, not the previous file
file_index = bisect.bisect_right(self.start_index, index)
frame = index - self.start_index[file_index - 1] if file_index > 0 else index
filename = self.files[file_index]
video = self.videos[file_index]
frames, seek_distance = lintel.loadvid(
video,
should_random_seek=True,
width=self.dataset.width,
height=self.dataset.height,
num_frames=self.dataset.num_frames,
fps_cap=60)
frames = np.frombuffer(frames, dtype=np.uint8)
frames = np.reshape(
frames, newshape=(self.dataset.num_frames, self.dataset.height,
self.dataset.width, 3))
for i in range(self.frames):
#TODO(jbarker): Tidy this up and remove redundant computation
if i == 0 and self.is_cropped:
crop_x = random.randint(0, self.image_shape[1] - self.frame_size[1])
crop_y = random.randint(0, self.image_shape[0] - self.frame_size[0])
elif self.is_cropped == False:
crop_x = math.floor((self.image_shape[1] - self.frame_size[1]) / 2)
crop_y = math.floor((self.image_shape[0] - self.frame_size[0]) / 2)
self.crop_size = self.frame_size
image = frames[i, crop_y:crop_y + self.crop_size[0],
crop_x:crop_x + self.crop_size[1],
:]
self.frame_buffer[:, i, :, :] = np.rollaxis(image, 2, 0)
return torch.from_numpy(self.frame_buffer)
class imageDataset():
def __init__(self, frames, is_cropped, crop_size,
root, batch_size):
self.root = root
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*/*.png'))
if len(self.files) < 1:
self.files = glob(os.path.join(self.root, '*/*.jpg'))
if len(self.files) < 1:
print(("[Error] No image files in %s" % (self.root)))
raise LookupError
self.files = sorted(self.files)
self.total_frames = 0
# Find start_indices for different folders
self.start_index = [0]
prev_folder = self.files[0].split('/')[-2]
for (i, f) in enumerate(self.files):
folder = f.split('/')[-2]
if i > 0 and folder != prev_folder:
self.start_index.append(i)
prev_folder = folder
self.total_frames -= (self.frames + 1)
else:
self.total_frames += 1
self.total_frames -= (self.frames + 1)
self.start_index.append(i)
self.image_shape = list(io.imread(self.files[0]).shape[:2])
print("Image size: ", self.image_shape[0], "x", self.image_shape[1], "\n")
if self.is_cropped:
self.image_shape = self.crop_size
self.frame_size = self.image_shape
self.frame_buffer = np.zeros((3, self.frames,
self.frame_size[0], self.frame_size[1]),
dtype = np.float32)
def __len__(self):
return self.total_frames
def __getitem__(self, index):
index = index % self.total_frames
# we want bisect_right here so that the first frame in a file gets the
# file, not the previous file
next_file_index = bisect.bisect_right(self.start_index, index)
if self.start_index[next_file_index] < index + self.frames:
index = self.start_index[next_file_index] - self.frames - 1
for (i, file_idx) in enumerate(range(index, index + self.frames)):
image = io.imread(self.files[file_idx])
#TODO(jbarker): Tidy this up and remove redundant computation
if i == 0 and self.is_cropped:
crop_x = random.randint(0, self.image_shape[1] - self.frame_size[1])
crop_y = random.randint(0, self.image_shape[0] - self.frame_size[0])
elif self.is_cropped == False:
crop_x = math.floor((self.image_shape[1] - self.frame_size[1]) / 2)
crop_y = math.floor((self.image_shape[0] - self.frame_size[0]) / 2)
self.crop_size = self.frame_size
image = image[crop_y:crop_y + self.crop_size[0],
crop_x:crop_x + self.crop_size[1],
:]
self.frame_buffer[:, i, :, :] = np.rollaxis(image, 2, 0)
return torch.from_numpy(self.frame_buffer)
| nvvl-master | pytorch/test/dataloading/datasets.py |
#!/usr/bin/env python
import os
import subprocess
import sys
from setuptools import setup, find_packages
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDAExtension, BuildExtension as build_ext_orig
from distutils.command.build import build as build_orig
from setuptools.command.install import install as install_orig
from setuptools.command.develop import develop as develop_orig
from distutils.errors import DistutilsFileError, DistutilsArgError
from distutils.dir_util import mkpath
from distutils.spawn import spawn
this_file = os.path.dirname(__file__)
nvvl_path = os.path.join(this_file, "../")
mycmdclass = {}
for super_class in [build_ext_orig, build_orig, install_orig, develop_orig]:
class command(super_class):
user_options = super_class.user_options + [
('with-nvvl=', None, 'Location of built nvvl library'),
('system-nvvl', None, 'Use the system installed nvvl library'),
]
def initialize_options(self):
super().initialize_options()
self.with_nvvl = None
self.system_nvvl = None
if (super_class.__name__ == "BuildExtension"):
name = "build_ext"
else:
name = super_class.__name__
print("Overriding", name)
mycmdclass[name] = command
def run_build(self):
if self.with_nvvl:
if self.system_nvvl:
raise DistutilsArgError("system-nvvl and with-nvvl are mutually exclusive")
libpath = os.path.join(self.with_nvvl, "libnvvl.so")
if not os.path.isfile(libpath):
raise DistutilsFileError("Provided with-nvvl path, but " + libpath + " doesn't exit.")
for ext in self.extensions:
ext.library_dirs += [self.with_nvvl]
self.distribution.data_files = [
('nvvl/', [libpath])]
elif not self.system_nvvl:
output_dir = os.path.join(self.build_temp, "nvvl-build")
mkpath(output_dir, 0o777, dry_run=self.dry_run)
cmake_cmd = ["cmake", "-B"+output_dir, "-H"+nvvl_path]
spawn(cmake_cmd, dry_run=self.dry_run)
make_cmd = ["make", "-C", output_dir, "-j4"]
spawn(make_cmd, dry_run=self.dry_run)
for ext in self.extensions:
ext.library_dirs += [output_dir]
ext.runtime_library_dirs = ["$ORIGIN"]
self.distribution.data_files = [
('nvvl/', [os.path.join(output_dir, "libnvvl.so")])]
build_ext_orig.run(self)
def finalize_options_build(self):
build_ext_orig.finalize_options(self)
for cmd in ['install', 'develop', 'build']:
self.set_undefined_options(cmd,
('system_nvvl', 'system_nvvl'),
('with_nvvl', 'with_nvvl')
)
mycmdclass["build_ext"].run = run_build
mycmdclass["build_ext"].finalize_options = finalize_options_build
this_file = os.path.dirname(__file__)
nvvl_path = os.path.join(this_file, "../")
sources = ['src/nvvl_th.cpp']
defines = [('WITH_CUDA', None)]
include_dirs = [os.path.join(nvvl_path, "include"),
os.path.join(nvvl_path, "src")]
nvvl_ext = CUDAExtension(
'nvvl._nvvl',
sources=sources,
define_macros=defines,
extra_objects=[],
languages=["c++"],
extra_compile_args=['-std=c++14'],
include_dirs=include_dirs,
libraries=['nvvl']
)
setup(
name="nvvl",
version="1.1",
description="Read frame sequences from a video file",
license="BSD",
url="https://github.com/NVIDIA/nvvl",
author="Jared Casper",
author_email="[email protected]",
setup_requires=["cmake"],
ext_modules=[nvvl_ext],
packages=["nvvl"],
cmdclass=mycmdclass
)
| nvvl-master | pytorch1.0/setup.py |
from .dataset import VideoDataset,ProcessDesc
from .loader import VideoLoader
from ._nvvl import video_size_from_file
| nvvl-master | pytorch1.0/nvvl/__init__.py |
import bisect
import collections
import random
import sys
import torch
import torch.utils.data
from . import _nvvl
class ProcessDesc(object):
"""Describes processing done on a decoded frame.
Parameters
----------
type : string, optional
Type of the output, can be one of "float", "half", or "byte"
(Default: "float")
width, height : int, optional
width and height to crop frame to, set to 0 for scaled frame
size (Default: 0)
scale_width, scale_height : int, optional
width and height to scale image to before cropping, set to 0
for no scaling (Default: 0)
normalized : bool, optional
Normalize all values to [0, 1] instead of [0, 255] (Default: False)
random_crop : bool, optional
If True, the origin of the crop is randomly choosen. If False,
the crop originates at (0, 0). (Default: False)
random_flip : bool, optional
If True, flip the image horizontally before cropping. (Default: False)
color_space : enum, optional
Color space to return images in, one of "RGB" or "YCbCr". (Default: RGB)
index_map : list of ints, optional
Map from indices into the decoded sequence to indices in this Layer.
None indicates a 1-to-1 mapping of the frames from sequence to
layer.
For examples, To reverse the frames of a 5 frame sequence, set
index_map to [4, 3, 2, 1, 0].
An index of -1 indicates that the decoded frame should not
be used in this layer. For example, to extract just the
middle frame from a 5 frame sequence, set index_map to
[-1, -1, 0, -1, -1].
The returned tensors will be sized to fit the maximum index in
this array (if it is provided, they will fit the full sequence
if it is None).
(Default: None)
dimension_order : string, optional
Order of dimensions in the returned tensors. Must contain
exactly one of 'f', 'c', 'h', and 'w'. 'f' for frames in the
sequence, 'c' for channel, 'h' for height, 'w' for width, and
'h'. (Default: "fchw")
"""
def __init__(self, type="float",
width=0, height=0, scale_width=0, scale_height=0,
normalized=False, random_crop=False, random_flip=False,
color_space="RGB", index_map=None, dimension_order="fchw"):
self._desc = _nvvl.LayerDesc()
self.width = width
self.height = height
self.scale_width = scale_width
self.scale_height = scale_height
self.normalized = normalized
self.random_crop = random_crop
self.random_flip = random_flip
if index_map:
self.index_map = index_map
self.count = max(index_map) + 1
else:
self.index_map = []
self.count = 0
if color_space.lower() == "rgb":
self.color_space = _nvvl.ColorSpace_RGB
self.channels = 3
elif color_space.lower() == "ycbcr":
self.color_space = _nvvl.ColorSpace_YCbCr
self.channels = 3
else:
raise ValueError("Unknown color space")
if type == "float":
self.tensor_type = torch.cuda.FloatTensor
elif type == "half":
self.tensor_type = torch.cuda.HalfTensor
elif type == "byte":
self.tensor_type = torch.cuda.ByteTensor
else:
raise ValueError("Unknown type")
self.dimension_order = dimension_order
def _get_dim(self, dim):
if dim == 'c':
return self.channels
elif dim == 'f':
return self.count
elif dim == 'h':
return self.height
elif dim == 'w':
return self.width
raise ValueError("Invalid dimension")
def get_dims(self):
dims = []
for d in self.dimension_order:
dims.append(self._get_dim(d))
return dims
def __getattribute__(self, name):
try:
d = super().__getattribute__("_desc")
return d.__getattribute__(name)
except AttributeError:
return super().__getattribute__(name)
except TypeError:
return super().__getattribute__(name)
raise AttributeError()
def __setattr__(self, name, value):
try:
self._desc.__setattr__(name, value)
except:
super().__setattr__(name, value)
def desc(self):
return self._desc
log_levels = {
"debug" : _nvvl.LogLevel_Debug,
"info" : _nvvl.LogLevel_Info,
"warn" : _nvvl.LogLevel_Warn,
"error" : _nvvl.LogLevel_Error,
"none" : _nvvl.LogLevel_None
}
class VideoDataset(torch.utils.data.Dataset):
"""VideoDataset
Parameters
----------
filenames : collection of strings
list of video files to draw from
sequence_length : int
how many frames are in each sample
device_id : int, optional
GPU device to use (Default: 0)
get_label : callable, optional
callable with signature:
(filename : str, frame_num : int) : anything
The returned value is simply passed as an output
alongside any returned frames.
If None, label returned is None. (Default: None)
processing : dict {string -> ProcessDesc}, optional
Describes processing to be done on the sequence to generate
each data item. If None, each frame in the sequence will be
returned as is. (Default: None)
log_level : string, optional
One of "debug", "info", "warn", "error", or "none".
(Default: "warn")
"""
def __init__(self, filenames, sequence_length, device_id=0,
get_label=None, processing=None, log_level="warn"):
self.filenames = filenames
self.sequence_length = sequence_length
self.device_id = device_id
self.get_label = get_label if get_label is not None else lambda x,y,z: None
self.processing = processing
if self.processing is None:
self.processing = {"default" : ProcessDesc()}
elif "labels" in processing and get_label is not None:
raise KeyError("Processing must not have a 'labels' key when get_label is not None.")
try:
log_level = log_levels[log_level]
except KeyError:
print("Invalid log level", log_level, "using warn.", file=sys.stderr)
log_level = _nvvl.LogLevel_Warn
if not filenames:
raise ValueError("Empty filenames list given to VideoDataset")
if sequence_length < 1:
raise ValueError("Sequence length must be at least 1")
self.loader = _nvvl.VideoLoader(self.device_id, log_level)
self.total_frames = 0
self.frame_counts = []
self.start_index = []
for f in filenames:
count = self.loader.frame_count(f);
if count < self.sequence_length:
print("NVVL WARNING: Ignoring", f, "because it only has", count,
"frames and the sequence length is", self.sequence_length)
continue
count = count - self.sequence_length + 1
self.frame_counts.append(count)
self.total_frames += count
self.start_index.append(self.total_frames) # purposefully off by one for bisect to work
size = self.loader.size()
self.width = size.width
self.height = size.height
for name, desc in self.processing.items():
if desc.width == 0:
if desc.scale_width == 0:
desc.width = self.width
else:
desc.width = desc.scale_width
if desc.height == 0:
if desc.scale_height == 0:
desc.height = self.height
else:
desc.height = desc.scale_height
if desc.count == 0:
desc.count = self.sequence_length
self.samples_left = 0
self.seq_queue = collections.deque()
self.seq_info_queue = collections.deque()
self.get_count = 0
self.get_count_warning_threshold = 1000
self.disable_get_warning = False
def get_stats(self):
return self.loader.get_stats()
def reset_stats(self):
return self.loader.reset_stats()
def set_log_level(self, level):
"""Sets the log level from now forward
Parameters
----------
level : string
The log level, one of "debug", "info", "warn", "error", or "none"
"""
self.loader.set_log_level(log_levels[level])
def _read_sample(self, index):
# we want bisect_right here so the first frame in a file gets the file, not the previous file
file_index = bisect.bisect_right(self.start_index, index)
frame = index - self.start_index[file_index - 1] if file_index > 0 else index
filename = self.filenames[file_index]
self.loader.read_sequence(filename, frame, self.sequence_length)
self.seq_info_queue.append((filename, frame))
self.samples_left += 1
def _get_layer_desc(self, desc):
d = desc.desc()
changes = {}
if (desc.random_crop and (self.width > desc.width)):
d.crop_x = random.randint(0, self.width - desc.width)
else:
d.crop_x = 0
changes['crop_x'] = d.crop_x
if (desc.random_crop and (self.height > desc.height)):
d.crop_y = random.randint(0, self.height - desc.height)
else:
d.crop_y = 0
changes['crop_y'] = d.crop_y
if (desc.random_flip):
d.horiz_flip = random.random() < 0.5
else:
d.horiz_flip = False
changes['horiz_flip'] = d.horiz_flip
return d, changes
def _start_receive(self, tensor_map, index=0):
seq = _nvvl.PictureSequence(self.sequence_length, self.device_id)
rand_changes = {}
for name, desc in self.processing.items():
tensor = tensor_map[name]
if desc.tensor_type == torch.cuda.FloatTensor:
layer = _nvvl.PictureSequence.FloatLayer()
elif desc.tensor_type == torch.cuda.HalfTensor:
layer = _nvvl.PictureSequence.HalfLayer()
elif desc.tensor_type == torch.cuda.ByteTensor:
layer = _nvvl.PictureSequence.ByteLayer()
strides = tensor[index].stride()
try:
desc.stride.x = strides[desc.dimension_order.index('w')]
desc.stride.y = strides[desc.dimension_order.index('h')]
desc.stride.n = strides[desc.dimension_order.index('f')]
desc.stride.c = strides[desc.dimension_order.index('c')]
except ValueError:
raise ValueError("Invalid dimension order")
layer.desc, rand_changes[name] = self._get_layer_desc(desc)
layer.index_map = desc.index_map
layer.set_data(tensor[index].data_ptr())
seq.set_layer(name, layer)
filename, frame = self.seq_info_queue.popleft()
self.seq_queue.append(seq)
self.loader.receive_frames(seq)
return seq, self.get_label(filename, frame, rand_changes)
def _finish_receive(self, synchronous=False):
if not self.seq_queue:
raise RuntimeError("Unmatched receive")
if self.samples_left <= 0:
raise RuntimeError("No more samples left in decoder pipeline")
seq = self.seq_queue.popleft()
if synchronous:
seq.wait()
else:
seq.wait_stream()
self.samples_left -= 1
def _create_tensor_map(self, batch_size=1):
tensor_map = {}
with torch.cuda.device(self.device_id):
for name, desc in self.processing.items():
tensor_map[name] = desc.tensor_type(batch_size, *desc.get_dims())
return tensor_map
def __getitem__(self, index):
if (self.samples_left > 0):
raise RuntimeError("Can't synchronously get an item when asyncronous frames are pending")
self.get_count += 1
if (self.get_count > self.get_count_warning_threshold
and not self.disable_get_warning):
print("WARNING: Frequent use of VideoDataset's synchronous get operation\n"
"detected. This operation is slow and should only be used for \n"
"debugging and other limited cases. To turn this warning off, set\n"
"the disable_get_warning attribute of the VideoDataset to True.\n")
self.disable_get_warning = True
self._read_sample(index)
tensor_map = self._create_tensor_map()
seq, label = self._start_receive(tensor_map)
self._finish_receive(True)
tensor_map_cpu = {name: tensor[0].cpu() for name, tensor in tensor_map.items()}
if label is not None:
tensor_map_cpu['labels'] = label
if len(tensor_map_cpu) == 1 and "default" in tensor_map_cpu:
return tensor_map_cpu["default"][0].cpu()
return tensor_map_cpu
def __len__(self):
return self.total_frames
| nvvl-master | pytorch1.0/nvvl/dataset.py |
import collections
import torch
from .dataset import VideoDataset
class VideoLoader(object):
"""Loads batches of sequences of frames from a video file. Meant to be
nearly a drop-in replacement for a torch.util.data.DataLoader.
Parameters
----------
dataset : VideoDataset
dataset from which to load the frames, must be a
nvvl.VideoDataset.
batch_size : int, optional
how many samples (i.e. sequences) per batch to load (Default: 1)
shuffle : bool, optional
shuffle the order of samples (Default: False)
distributed : bool, optional
use a distributed sampler, requires shuffle (Default: False)
sampler : torch.utils.data.Sampler, optional
defines the strategy to draw samples from the
dataset. Mutually exclusive with shuffle and distributed.
batch_sampler : torch.utils.data.Sampler, optional
like sampler, but returns a batch of indices at a
time. Mutually exclusive with batch_size, shuffle,
distributed, sampler, and drop_last.
drop_last : bool, optional
drop the last incomplete batch. It is currently not
implemented to have this set to False. (Default: True)
buffer_length : int, optional
number of batches to preload (Default: 3)
"""
def __init__(self, dataset, batch_size=1, shuffle=False,
distributed=False, sampler=None,
batch_sampler=None, drop_last=True, buffer_length=3):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
if batch_sampler is not None:
if (batch_size > 1 or shuffle or distributed
or sampler is not None or drop_last):
raise ValueError('batch_sampler is mutually exclusive with '
'batch_size, shuffle, distributed, sampler, '
'and drop_last')
if sampler is not None:
if shuffle or distributed:
raise ValueError("sampler is mutually exclusive with shuffle and distributed")
if batch_sampler is None:
if sampler is None:
if distributed:
if not shuffle:
raise ValueError("pytorch distributed is always shuffled")
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
elif shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
self.tensor_queue = collections.deque()
self.batch_size_queue = collections.deque()
self.buffer_length = buffer_length
def _receive_batch(self):
batch_size = self.batch_size_queue.popleft()
t = self.dataset._create_tensor_map(batch_size)
labels = []
for i in range(batch_size):
_, label = self.dataset._start_receive(t, i)
labels.append(label)
self.tensor_queue.append((batch_size, t, labels))
def get_stats(self):
return self.dataset.get_stats()
def reset_stats(self):
return self.dataset.reset_stats()
def __len__(self):
return len(self.batch_sampler)
def __next__(self):
if not self.tensor_queue:
assert self.dataset.samples_left == 0, "Tensor queue is empty but there are samples left in the VideoDataset"
raise StopIteration
# first fire off a receive to keep the pipe filled
if self.batch_size_queue:
self._receive_batch()
batch_size, t, labels = self.tensor_queue.popleft()
for i in range(batch_size):
self.dataset._finish_receive()
if any(label is not None for label in labels):
t["labels"] = labels
return t
def __iter__(self):
if self.dataset.samples_left != 0:
raise RuntimeError("Need to exhaust iterator before creating a new one")
for b in iter(self.batch_sampler):
for i in b:
self.dataset._read_sample(i)
self.batch_size_queue.append(len(b))
for i in range(self.buffer_length):
self._receive_batch()
return self
| nvvl-master | pytorch1.0/nvvl/loader.py |
from __future__ import print_function
import argparse
from glob import glob
import os
import sys
import time
import torch
import nvvl
import psutil
from dataloading.dataloaders import get_loader
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, required=True,
help='folder of mp4/png files')
parser.add_argument('--sleep', type=float, required=True,
help='dummy computation time')
parser.add_argument('--loader', type=str, required=True,
help='dataloader: pytorch/NVVL/lintel')
parser.add_argument('--batchsize', type=int, default=8,
help='batch size loaded')
parser.add_argument('--frames', type=int, default=3,
help='number of frames in each loaded sequence')
parser.add_argument('--is_cropped', action='store_true',
help='crop input frames?')
parser.add_argument('--crop_size', type=int, nargs='+', default=[-1, -1],
help='[height, width] for input crop')
parser.add_argument('--fp16', action='store_true',
help='load data in fp16?')
def main(args):
assert args.sleep >= 0.0, print('Computation time must be >=0.0s')
print(str(args) + '\n')
loader, batches = get_loader(args)
counter = 0
data_time_sum = 0
iter_time_sum = 0
cpu_sum = 0
mem_sum = 0
for epoch in range(2):
start = time.time()
for i, x in enumerate(loader, 1):
if args.loader != 'NVVL':
x = x.cuda()
if args.fp16:
x = x.half()
if epoch > 0:
counter += 1
end = time.time()
data_t = end-start
if args.sleep > 0.0:
time.sleep(args.sleep)
end = time.time()
iter_t = end-start
data_time_sum += data_t
iter_time_sum += iter_t
c = psutil.cpu_percent()
cpu_sum += c
m = psutil.virtual_memory().percent
mem_sum += m
start = time.time()
data_time_ave = data_time_sum / counter
iter_time_ave = iter_time_sum / counter
cpu_ave = cpu_sum / counter
mem_ave = mem_sum / counter
print("Data loading time avg, iteration time avg, cpu load avg, memory usage avg")
print("%.5f %.5f %.2f %.2f" % (data_time_ave, iter_time_ave, cpu_ave, mem_ave))
if __name__=='__main__':
main(parser.parse_args())
| nvvl-master | pytorch1.0/test/benchmark.py |
import sys
import copy
from glob import glob
import math
import os
import torch
from torch.utils.data import DataLoader
from dataloading.datasets import imageDataset, lintelDataset
import nvvl
class NVVL():
def __init__(self, frames, is_cropped, crop_size, root,
batchsize=1, device_id=0,
shuffle=False, fp16=False):
self.root = root
self.batchsize = batchsize
self.shuffle = shuffle
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*.mp4'))
if len(self.files) < 1:
print(("[Error] No video files in %s" % (self.root)))
raise LookupError
if fp16:
tensor_type = 'half'
else:
tensor_type = 'float'
self.image_shape = nvvl.video_size_from_file(self.files[0])
height = max(self.image_shape.height, self.crop_size[0])
width = max(self.image_shape.width, self.crop_size[1])
print("Video size: ", height, "x", width, "\n")
processing = {"input" : nvvl.ProcessDesc(type=tensor_type,
height=height,
width=width,
random_crop=self.is_cropped,
random_flip=False,
normalized=False,
color_space="RGB",
dimension_order="cfhw",
index_map=[0, 1, 2])}
dataset = nvvl.VideoDataset(self.files,
sequence_length=self.frames,
processing=processing)
self.loader = nvvl.VideoLoader(dataset,
batch_size=self.batchsize,
shuffle=self.shuffle)
def __len__(self):
return len(self.loader)
def __iter__(self):
return iter(self.loader)
def get_loader(args):
if args.loader == 'pytorch':
dataset = imageDataset(
args.frames,
args.is_cropped,
args.crop_size,
args.root,
args.batchsize)
sampler = torch.utils.data.sampler.RandomSampler(dataset)
train_loader = DataLoader(
dataset,
batch_size=args.batchsize,
shuffle=(sampler is None),
num_workers=10,
pin_memory=True,
sampler=sampler,
drop_last=True)
train_batches = len(dataset)
elif args.loader == 'lintel':
dataset = lintelDataset(
args.frames,
args.is_cropped,
args.crop_size,
args.root,
args.batchsize)
sampler = torch.utils.data.sampler.RandomSampler(dataset)
train_loader = DataLoader(
dataset,
batch_size=args.batchsize,
shuffle=(sampler is None),
num_workers=10,
pin_memory=True,
sampler=sampler,
drop_last=True)
train_batches = len(dataset)
elif args.loader == 'NVVL':
train_loader = NVVL(
args.frames,
args.is_cropped,
args.crop_size,
args.root,
batchsize=args.batchsize,
shuffle=True,
fp16=args.fp16)
train_batches = len(train_loader)
else:
raise ValueError('%s is not a valid option for --loader' % args.loader)
return train_loader, train_batches
| nvvl-master | pytorch1.0/test/dataloading/dataloaders.py |
import copy
import sys, time, argparse, os, subprocess, shutil
import math, numbers, random, bisect
import subprocess
from random import Random
from skimage import io, transform
from os import listdir
from os.path import join
from glob import glob
import numpy as np
import torch
import torch.utils.data as data
import nvvl
import lintel
class dataset(object):
def __init__(self, width, height, frames):
self.width = width
self.height = height
self.num_frames = frames
class lintelDataset():
def __init__(self, frames, is_cropped, crop_size,
root, batch_size, frame_size = [-1, -1]):
self.root = root
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*.mp4'))
assert len(self.files) > 1, "[Error] No video files in %s" % self.root
image_shape = nvvl.video_size_from_file(self.files[0])
self.image_shape = [image_shape.height, image_shape.width]
self.frame_size = frame_size
print("Video size: ", self.image_shape[0], "x", self.image_shape[1], "\n")
if self.is_cropped:
self.frame_size = self.crop_size
else:
self.frame_size = self.image_shape
self.dataset = dataset(width=self.image_shape[1],
height=self.image_shape[0],
frames=self.frames)
self.gt_step = (self.frames - 1) * 2 + 1
self.total_frames = 0
self.frame_counts = []
self.start_index = []
self.videos = []
for i, filename in enumerate(self.files):
with open(filename, 'rb') as f:
video = f.read()
self.videos.append(video)
cmd = ["ffprobe", "-v", "error", "-count_frames", "-select_streams",
"v:0", "-show_entries", "stream=nb_frames", "-of",
"default=nokey=1:noprint_wrappers=1", filename]
count = int(subprocess.check_output(cmd))
count -= self.gt_step
if count < 0:
print("[Warning] Video does not have enough frames\n\t%s" % f)
count = 0
self.total_frames += count
self.frame_counts.append(count)
self.start_index.append(self.total_frames)
assert self.total_frames >= 1, "[Error] Not enough frames at \n\t%s" % self.root
self.frame_buffer = np.zeros((3, self.frames,
self.frame_size[0], self.frame_size[1]),
dtype = np.float32)
def __len__(self):
return self.total_frames
def __getitem__(self, index):
index = index % self.total_frames
# we want bisect_rigtht here so that the first frame in a file gets the
# file, not the previous file
file_index = bisect.bisect_right(self.start_index, index)
frame = index - self.start_index[file_index - 1] if file_index > 0 else index
filename = self.files[file_index]
video = self.videos[file_index]
frames, seek_distance = lintel.loadvid(
video,
should_random_seek=True,
width=self.dataset.width,
height=self.dataset.height,
num_frames=self.dataset.num_frames,
fps_cap=60)
frames = np.frombuffer(frames, dtype=np.uint8)
frames = np.reshape(
frames, newshape=(self.dataset.num_frames, self.dataset.height,
self.dataset.width, 3))
for i in range(self.frames):
#TODO(jbarker): Tidy this up and remove redundant computation
if i == 0 and self.is_cropped:
crop_x = random.randint(0, self.image_shape[1] - self.frame_size[1])
crop_y = random.randint(0, self.image_shape[0] - self.frame_size[0])
elif self.is_cropped == False:
crop_x = math.floor((self.image_shape[1] - self.frame_size[1]) / 2)
crop_y = math.floor((self.image_shape[0] - self.frame_size[0]) / 2)
self.crop_size = self.frame_size
image = frames[i, crop_y:crop_y + self.crop_size[0],
crop_x:crop_x + self.crop_size[1],
:]
self.frame_buffer[:, i, :, :] = np.rollaxis(image, 2, 0)
return torch.from_numpy(self.frame_buffer)
class imageDataset():
def __init__(self, frames, is_cropped, crop_size,
root, batch_size):
self.root = root
self.frames = frames
self.is_cropped = is_cropped
self.crop_size = crop_size
self.files = glob(os.path.join(self.root, '*/*.png'))
if len(self.files) < 1:
self.files = glob(os.path.join(self.root, '*/*.jpg'))
if len(self.files) < 1:
print(("[Error] No image files in %s" % (self.root)))
raise LookupError
self.files = sorted(self.files)
self.total_frames = 0
# Find start_indices for different folders
self.start_index = [0]
prev_folder = self.files[0].split('/')[-2]
for (i, f) in enumerate(self.files):
folder = f.split('/')[-2]
if i > 0 and folder != prev_folder:
self.start_index.append(i)
prev_folder = folder
self.total_frames -= (self.frames + 1)
else:
self.total_frames += 1
self.total_frames -= (self.frames + 1)
self.start_index.append(i)
self.image_shape = list(io.imread(self.files[0]).shape[:2])
print("Image size: ", self.image_shape[0], "x", self.image_shape[1], "\n")
if self.is_cropped:
self.image_shape = self.crop_size
self.frame_size = self.image_shape
self.frame_buffer = np.zeros((3, self.frames,
self.frame_size[0], self.frame_size[1]),
dtype = np.float32)
def __len__(self):
return self.total_frames
def __getitem__(self, index):
index = index % self.total_frames
# we want bisect_right here so that the first frame in a file gets the
# file, not the previous file
next_file_index = bisect.bisect_right(self.start_index, index)
if self.start_index[next_file_index] < index + self.frames:
index = self.start_index[next_file_index] - self.frames - 1
for (i, file_idx) in enumerate(range(index, index + self.frames)):
image = io.imread(self.files[file_idx])
#TODO(jbarker): Tidy this up and remove redundant computation
if i == 0 and self.is_cropped:
crop_x = random.randint(0, self.image_shape[1] - self.frame_size[1])
crop_y = random.randint(0, self.image_shape[0] - self.frame_size[0])
elif self.is_cropped == False:
crop_x = math.floor((self.image_shape[1] - self.frame_size[1]) / 2)
crop_y = math.floor((self.image_shape[0] - self.frame_size[0]) / 2)
self.crop_size = self.frame_size
image = image[crop_y:crop_y + self.crop_size[0],
crop_x:crop_x + self.crop_size[1],
:]
self.frame_buffer[:, i, :, :] = np.rollaxis(image, 2, 0)
return torch.from_numpy(self.frame_buffer)
| nvvl-master | pytorch1.0/test/dataloading/datasets.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""Setup script to build the TAO Toolkit package."""
import os
import setuptools
from release.python.utils import utils
PACKAGE_LIST = [
"nvidia_tao_tf1",
"third_party",
]
version_locals = utils.get_version_details()
setuptools_packages = []
for package_name in PACKAGE_LIST:
setuptools_packages.extend(utils.find_packages(package_name))
setuptools.setup(
name=version_locals['__package_name__'],
version=version_locals['__version__'],
description=version_locals['__description__'],
author='NVIDIA Corporation',
classifiers=[
'Environment :: Console',
# Pick your license as you wish (should match "license" above)
'License :: Other/Proprietary License',
'Natural Language :: English',
'Operating System :: Linux',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
license=version_locals['__license__'],
keywords=version_locals['__keywords__'],
packages=setuptools_packages,
package_data={
'': ['*.py', "*.pyc", "*.yaml", "*.so", "*.pdf"]
},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'bpnet=nvidia_tao_tf1.cv.bpnet.entrypoint.bpnet:main',
'classification_tf1=nvidia_tao_tf1.cv.makenet.entrypoint.makenet:main',
'efficientdet_tf1=nvidia_tao_tf1.cv.efficientdet.entrypoint.efficientdet:main',
'fpenet=nvidia_tao_tf1.cv.fpenet.entrypoint.fpenet:main',
'mask_rcnn=nvidia_tao_tf1.cv.mask_rcnn.entrypoint.mask_rcnn:main',
'multitask_classification=nvidia_tao_tf1.cv.multitask_classification.entrypoint.multitask_classification:main',
'unet=nvidia_tao_tf1.cv.unet.entrypoint.unet:main',
'lprnet=nvidia_tao_tf1.cv.lprnet.entrypoint.lprnet:main',
'detectnet_v2=nvidia_tao_tf1.cv.detectnet_v2.entrypoint.detectnet_v2:main',
'ssd=nvidia_tao_tf1.cv.ssd.entrypoint.ssd:main',
'dssd=nvidia_tao_tf1.cv.ssd.entrypoint.ssd:main',
'retinanet=nvidia_tao_tf1.cv.retinanet.entrypoint.retinanet:main',
'faster_rcnn=nvidia_tao_tf1.cv.faster_rcnn.entrypoint.faster_rcnn:main',
'yolo_v3=nvidia_tao_tf1.cv.yolo_v3.entrypoint.yolo_v3:main',
'yolo_v4=nvidia_tao_tf1.cv.yolo_v4.entrypoint.yolo_v4:main',
'yolo_v4_tiny=nvidia_tao_tf1.cv.yolo_v4.entrypoint.yolo_v4:main',
]
}
)
| tao_tensorflow1_backend-main | setup.py |
# Copyright NVIDIA Corporation
"""Instantiate the TAO-TF1 docker container for developers."""
import argparse
from distutils.version import LooseVersion
import json
import os
import subprocess
import sys
ROOT_DIR = os.getenv("NV_TAO_TF_TOP", os.path.dirname(os.path.dirname(os.getcwd())))
print(f"Current root directory {ROOT_DIR}")
with open(os.path.join(ROOT_DIR, "docker/manifest.json"), "r") as m_file:
docker_config = json.load(m_file)
DOCKER_REGISTRY = docker_config["registry"]
DOCKER_REPOSITORY = docker_config["repository"]
DOCKER_TAG = docker_config["tag"]
DOCKER_COMMAND = "docker"
HOME_PATH = os.path.expanduser("~")
MOUNTS_PATH = os.path.join(HOME_PATH, ".tao_mounts.json")
def get_docker_mounts_from_file(mounts_file=MOUNTS_PATH):
"""Check for docker mounts in ~/.tao_mounts.json."""
if not os.path.exists(mounts_file):
return []
with open(mounts_file, 'r') as mfile:
data = json.load(mfile)
assert "Mounts" in list(data.keys()), "Invalid json file. Requires Mounts key."
return data["Mounts"]
def resolve_path(path_string):
"""Resolve UNIX style paths."""
return os.path.abspath(os.path.expanduser(path_string))
def format_mounts(mount_points):
"""Format mount points to docker recognizable commands."""
formatted_mounts = []
# Traverse through mount points and add format them for the docker command.
for mount_point in mount_points:
assert "source" in list(mount_point.keys()), "destination" in list(mount_point.keys())
mount = "{}:{}".format(
resolve_path(mount_point["source"]), mount_point["destination"]
)
formatted_mounts.append(mount)
return formatted_mounts
def check_image_exists(docker_image):
"""Check if the image exists locally."""
check_command = '{} images | grep "\\<{}\\>" | grep "{}" >/dev/null 2>&1'.format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(check_command, stdout=sys.stderr, shell=True)
return rc == 0
def pull_base_container(docker_image):
"""Pull the default base container."""
pull_command = "{} pull {}:{}".format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(pull_command, stdout=sys.stderr, shell=True)
return rc == 0
def get_formatted_mounts(mount_file):
"""Simple function to get default mount points."""
default_mounts = get_docker_mounts_from_file(mount_file)
return format_mounts(default_mounts)
def check_mounts(formatted_mounts):
"""Check the formatted mount commands."""
assert type(formatted_mounts) == list
for mounts in formatted_mounts:
source_path = mounts.split(":")[0]
if not os.path.exists(source_path):
raise ValueError("Path doesn't exist: {}".format(source_path))
return True
def get_docker_gpus_prefix(gpus):
"""Get the docker command gpu's prefix."""
docker_version = (
subprocess.check_output(
["docker", "version", "--format={{ .Server.APIVersion }}"]
)
.strip()
.decode()
)
if LooseVersion(docker_version) >= LooseVersion("1.40"):
# You are using the latest version of docker using
# --gpus instead of the nvidia runtime.
gpu_string = "--gpus "
if gpus == "all":
gpu_string += "all"
else:
gpu_string += "\'\"device={}\"\'".format(gpus)
else:
# Stick to the older version of getting the gpu's using runtime=nvidia
gpu_string = "--runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=all "
if gpus != "none":
gpu_string += "-e NVIDIA_VISIBLE_DEVICES={}".format(gpus)
return gpu_string
def create_base_docker():
"""Function to create the base docker."""
create_command = "bash {}/docker/build.sh --build".format(ROOT_DIR)
try:
subprocess.run(create_command, stdout=sys.stderr, shell=True, check=True)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Container build failed with error {e}")
def instantiate_dev_docker(gpus, mount_file,
mount_cli_list,
env_var_list,
command, ulimit=None,
shm_size="16G", run_as_user=False,
port_mapping=None):
"""Instiate the docker container."""
docker_image = "{}/{}:{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, DOCKER_TAG)
# Invoking the nvidia docker.
gpu_string = get_docker_gpus_prefix(gpus)
# Prefix for the run command.
run_command = "{} run -it --rm".format(DOCKER_COMMAND)
# get default mount points.
formatted_mounts = get_formatted_mounts(MOUNTS_PATH)
# get mounts from cli mount file.
formatted_mounts += get_formatted_mounts(mount_file)
if mount_cli_list is not None:
formatted_mounts.extend(mount_cli_list)
assert check_mounts(formatted_mounts), "Mounts don't exists, Please make sure the paths all exist."
mount_string = "-v {}:/workspace/tao-tf1 ".format(os.getenv("NV_TAO_TF_TOP", os.getcwd()))
# Defining env variables.
env_variables = "-e PYTHONPATH={}:$PYTHONPATH ".format("/workspace/tao-tf1")
for env in env_var_list:
if "=" not in env:
print("invalid env variable definition. skipping this {}".format(env))
continue
env_variables += "-e {} ".format(env)
for path in formatted_mounts:
mount_string += "-v {} ".format(path)
# Setting shared memory.
shm_option = "--shm-size {}".format(shm_size)
# Setting ulimits for host
ulimit_options = ""
if ulimit is not None:
for param in ulimit:
ulimit_options += "--ulimit {} ".format(param)
user_option = ""
if run_as_user:
user_option = "--user {}:{}".format(os.getuid(), os.getgid())
working_directory = "/workspace/tao-tf1"
working_dir_option = f"-w {working_directory}"
port_option = "--net=host"
if port_mapping:
port_option += f" -p {port_mapping}"
final_command = "{} {} {} {} {} {} {} {} {} {} {}".format(
run_command, gpu_string,
mount_string, env_variables,
shm_option, ulimit_options, user_option, working_dir_option,
port_option,
docker_image, " ".join(command)
)
print(final_command)
return subprocess.check_call(final_command, stdout=sys.stderr, shell=True)
def parse_cli_args(args=None):
"""Parse run container command line."""
parser = argparse.ArgumentParser(
prog="tao_tf1",
description="Tool to run the TAO Toolkit TensorFlow2 container.",
add_help=True)
parser.add_argument(
"--gpus",
default="all",
type=str,
help="Comma separated GPU indices to be exposed to the docker."
)
parser.add_argument(
"--volume",
action="append",
type=str,
default=[],
help="Volumes to bind."
)
parser.add_argument(
"--env",
action="append",
type=str,
default=[],
help="Environment variables to bind."
)
parser.add_argument(
"--mounts_file",
help="Path to the mounts file.",
default="",
type=str
)
parser.add_argument(
"--shm_size",
help="Shared memory size for docker",
default="16G",
type=str
)
parser.add_argument(
"--run_as_user",
help="Flag to run as user",
action="store_true",
default=False
)
parser.add_argument(
"--ulimit",
action='append',
help="Docker ulimits for the host machine."
)
parser.add_argument(
"--port",
type=str,
default=None,
help="Port mapping (e.g. 8889:8889)."
)
args = vars(parser.parse_args(args))
return args
def main(cl_args=None):
"""Start docker container."""
if "--" in cl_args:
index = cl_args.index("--")
# Split args to the tao docker wrapper and the command to be run inside the docker.
tao_tf_args = cl_args[:index]
command_args = cl_args[index + 1:]
else:
tao_tf_args = cl_args
command_args = ""
# parse command line args.
args = parse_cli_args(tao_tf_args)
docker_image = "{}/{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY)
if not check_image_exists(docker_image):
if not pull_base_container(docker_image):
print("The base container doesn't exist locally and the pull failed. Hence creating the base container")
create_base_docker()
try:
instantiate_dev_docker(
args["gpus"], args["mounts_file"],
args["volume"], args["env"],
command_args, args["ulimit"], args["shm_size"],
args["run_as_user"], args['port']
)
except subprocess.CalledProcessError:
# Do nothing - the errors are printed in entrypoint launch.
pass
if __name__ == "__main__":
main(sys.argv[1:])
| tao_tensorflow1_backend-main | runner/tao_tf.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""TAO Toolkit TF1 runner"""
| tao_tensorflow1_backend-main | runner/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA experimental module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | internal/__init__.py |
"""Init module to run the tflite conversion."""
import os
# Apply patch to keras if only the model is loaded with native keras
# and not tensorflow.keras.
if os.getenv("TF_KERAS", "0") != "1":
import third_party.keras.mixed_precision as MP
import third_party.keras.tensorflow_backend as TFB
MP.patch()
TFB.patch()
| tao_tensorflow1_backend-main | internal/tflite/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import sys
import tempfile
from nvidia_tao_tf1.core.export import keras_to_pb
from nvidia_tao_tf1.core.export._quantized import check_for_quantized_layers, process_quantized_layers
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS, model_io
import keras
from keras.utils import CustomObjectScope
import tensorflow as tf
logger = logging.getLogger(__name__)
def reset_keras(fn):
"""Simple function to define the keras decorator.
This decorator clears any previously existing sessions
and sets up a new session.
"""
def _fn_wrapper(*args, **kwargs):
"""Clear the keras session."""
keras.backend.clear_session()
set_keras_session()
keras.backend.set_learning_phase(0)
return fn(*args, **kwargs)
return _fn_wrapper
def set_keras_session():
"""Set the keras and Tensorflow sessions."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.set_session(tf.Session(config=config))
@reset_keras
def load_model(model_path: str, key=""):
"""Load the keras model.
Args:
model_path(str): Path to the model.
key(str): The key to load the model.
"""
model = model_io(
model_path,
enc_key=key
)
return model
def resolve_path(path_string: str):
"""Simple function to resolve paths.
Args:
path_string (str): Path to model string.
"""
return os.path.abspath(os.path.expanduser(path_string))
def save_model(model, output_path: str):
"""Save the keras model.
Args:
model (keras.models.Model): Path to the keras model to be saved.
output_path (str): Path to save the model.
"""
with CustomObjectScope(CUSTOM_OBJS):
model.save(resolve_path(output_path))
def extract_model_scales(model,
backend: str = "onnx"):
"""Remove QDQ and Quantized* layers and extract the scales.
Args:
model (keras.model.Model): Model to inspect and extract scales.
backend (str): "onnx,uff" model backend.
"""
model, tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
logger.info(
"Extracting tensor scale: {tensor_scale_dict}".format(
tensor_scale_dict=tensor_scale_dict
)
)
logger.info("Extracting quantized scales")
os_handle, tmp_keras_model = tempfile.mkstemp(suffix=".hdf5")
os.close(os_handle)
with CustomObjectScope(CUSTOM_OBJS):
model.save(tmp_keras_model)
new_model = load_model(tmp_keras_model)
return new_model
def convert_to_pb(model, output_node_names=None):
"""Convert the model to graphdef protobuf.
Args:
model (keras.model.Model): Keras model object to serialize.
output_node_names (dict): Name of the output nodes of the model.
Returns:
tmp_pb_file (str): Path to the protobuf file containing tf.graphDef.
input_tensor_names (list): Names of the input tensors.
output_tensor_names (list): Name of the output tensors.
"""
os_handle, tmp_pb_file = tempfile.mkstemp(
suffix=".pb"
)
os.close(os_handle)
input_tensor_names, out_tensor_names, _ = keras_to_pb(
model,
tmp_pb_file,
None,
custom_objects=CUSTOM_OBJS
)
if output_node_names:
out_tensor_names = output_node_names
return tmp_pb_file, input_tensor_names, out_tensor_names
def parse_command_line(cl_args="None"):
"""Parse command line args."""
parser = argparse.ArgumentParser(
prog="export_tflite",
description="Export keras models to tflite."
)
parser.add_argument(
"--model_file",
type=str,
default="",
help="Path to a model file."
)
parser.add_argument(
"--key",
type=str,
default="",
help="Key to load the model."
)
parser.add_argument(
"--output_file",
type=str,
default="",
help="Path to the output model file."
)
args = vars(parser.parse_args(cl_args))
return args
def main(cl_args=None):
"""Model converter."""
# Convert the model
args = parse_command_line(cl_args=cl_args)
input_model_file = args["model_file"]
output_model_file = args["output_file"]
key = args["key"]
tensor_scale_dict = None
if not output_model_file:
output_model_file = f"{os.path.splitext(input_model_file)[0]}.tflite"
model = load_model(
input_model_file, key
)
quantized_model = check_for_quantized_layers(model)
logger.info("Quantized model: {quantized_model}".format(quantized_model=quantized_model))
if quantized_model:
model, tensor_scale_dict = extract_model_scales(
model, backend="onnx"
)
tensor_scale_file = os.path.join(
os.path.dirname(output_model_file),
"calib_scale.json"
)
with open(tensor_scale_file, "w") as scale_file:
json.dump(
tensor_scale_dict, scale_file, indent=4
)
graph_def_file, input_arrays, output_arrays = convert_to_pb(
model
)
# Convert the model to TFLite.
converter = tf.lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays
)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
tflite_model = converter.convert()
with open(output_model_file, "wb") as tflite_file:
model_size = tflite_file.write(tflite_model)
print(
f"Output tflite model of size {model_size} bytes "
f"was written at {output_model_file}"
)
if __name__ == "__main__":
main(cl_args=sys.argv[1:])
| tao_tensorflow1_backend-main | internal/tflite/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA experiment export module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | internal/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to generate templates and save these models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tensorflow as tf
import graphsurgeon as gs
from keras import backend as K
from nvidia_tao_tf1.core.export import (
keras_to_pb,
pb_to_uff,
UFFEngineBuilder
)
from nvidia_tao_tf1.cv.makenet.utils.helper import model_io
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
import tensorrt as trt
TENSORRT_LOGGER = trt.Logger(trt.Logger.Severity.VERBOSE)
def main(cl_args=None):
"""Simple function to get tensorrt engine and UFF."""
args = parse_command_line(args=cl_args)
model_path = args['input_model']
key = str(args['key'])
output_pb = None
if args['output_pb'] is not None:
output_pb = args['output_pb']
else:
output_pb = "{}.pb".format(os.path.splitext(model_path)[0])
output_node_names = args['output_node_names'].split(',')
output_uff = "{}.uff".format(os.path.splitext(model_path)[0])
keras_model = model_io(model_path=model_path, enc_key=key)
keras_model.summary()
input_shape = list(keras_model.inputs[0].shape[1:])
input_dims = {"input_1": input_shape}
print("Converting keras to pb model.")
nvidia_tao_tf1.core.export.keras_to_pb(keras_model, output_pb, output_node_names)
print("Inspecting pb model.")
inspect_pb(output_pb)
nvidia_tao_tf1.core.export._uff.pb_to_uff(output_pb, output_uff, output_node_names)
# Getting engine with TensorRT
UFFEngineBuilder(output_uff, "input_1", input_dims,
output_node_names, verbose=True, max_batch_size=16,
max_workspace_size=1<<30)
def inspect_pb(output_pb):
sg = gs.StaticGraph(output_pb)
for node in sg:
if "bn_conv1" in node.name:
print('Node: {}'.format(node.name))
def parse_command_line(args=None):
"""Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
parser = argparse.ArgumentParser(prog='train',
description='Train a classification model.')
parser.add_argument(
'-i',
'--input_model',
type=str,
required=True,
help='Path to the experiment spec file.')
parser.add_argument(
'--output_node_names',
type=str,
default="predictions/Softmax",
help='Names of output nodes.'
)
parser.add_argument(
'-o',
'--output_pb',
type=str,
default=None,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
help='Decryption key.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Include this flag in command line invocation for verbose logs.'
)
return vars(parser.parse_args(args))
if __name__=="__main__":
main()
| tao_tensorflow1_backend-main | internal/export/generate_classifier.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export APIs as defined in maglev sdk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import atexit
import logging
import os
import random
import sys
import struct
import tempfile
import numpy as np
from PIL import Image
import pycuda.driver as cuda
import pycuda.tools as tools
from nvidia_tao_tf1.core.export._tensorrt import CaffeEngineBuilder, Engine, UFFEngineBuilder
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.export.tensorfile_calibrator import \
TensorfileCalibrator as Calibrator
from nvidia_tao_tf1.cv.common.export.tensorfile import TensorFile
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.encoding import encoding
from tqdm import tqdm
DEFAULT_MAX_WORKSPACE_SIZE = 1 << 30
DEFAULT_MAX_BATCH_SIZE = 100
logger = logging.getLogger(__name__)
def parse_etlt_model(etlt_model, key):
"""Parse etlt model file.
Args:
etlt_model (str): path to the etlt model file.
key (str): String key to decode the model.
Returns:
uff_model (str): Path to the UFF model file."""
if not os.path.exists(expand_path(etlt_model)):
raise ValueError("Cannot find etlt file.")
os_handle, tmp_uff_file = tempfile.mkstemp()
os.close(os_handle)
# Unpack etlt file.
with open(expand_path(etlt_model), "rb") as efile:
num_chars = efile.read(4)
num_chars = struct.unpack("<i", num_chars)[0]
input_node = str(efile.read(num_chars))
with open(tmp_uff_file, "wb") as tfile:
encoding.decode(efile, tfile, key.encode())
logger.debug("Parsed ETLT model file.")
return tmp_uff_file, input_node
def calibrate_fm_caffe(caffe_prototxt,
caffe_caffemodel,
uff_model,
etlt_model,
key,
input_dims=None,
output_node_names=None,
dtype='int8',
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
calibration_data_filename=None,
calibration_cache_filename=None,
calibration_n_batches=550,
calibration_batch_size=16,
parser='uff',
verbose=True,
trt_engine=True,
experiment_spec=None,
engine_serialize='engine.trt'):
"""Create a TensorRT engine out of a Keras model.
NOTE: the current Keras session is cleared in this function.
Do not use this function during training.
Args:
caffe_prototxt: (str) prototxt for caffe generated model.
caffe_caffemodel: (str) caffemodel for caffe generated model.
in_dims (list or dict): list of input dimensions, or a dictionary of
input_node_name:input_dims pairs in the case of multiple inputs.
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
If not provided, then the last layer is assumed to be the output node.
max_workspace_size (int): maximum TensorRT workspace size.
max_batch_size (int): maximum TensorRT batch size.
calibration_data_filename (str): calibratio data file to use.
calibration_cache_filename (str): calibration cache file to write to.
calibration_n_batches (int): number of calibration batches.
calibration_batch_size (int): calibration batch size.
parser='uff' (str): parser ('uff' or 'caffe') to use for intermediate representation.
verbose (bool): whether to turn ON verbose messages.
Returns:
The names of the input and output nodes. These must be
passed to the TensorRT optimization tool to identify
input and output blobs. If multiple output nodes are specified,
then a list of output node names is returned.
"""
if dtype == 'int8':
if calibration_data_filename is not None:
logger.info("Setting up calibrator")
logger.info("Calibrator parameters: nbatches = {}, batch_size = {}".format(calibration_n_batches,
calibration_batch_size
))
calibrator = Calibrator(data_filename=calibration_data_filename,
cache_filename=calibration_cache_filename,
n_batches=calibration_n_batches,
batch_size=calibration_batch_size)
else:
raise ValueError(
"A valid calibration data filename or experiment spec file "
"was required."
)
else:
calibrator = None
input_node_name = 'input_1'
logger.info("Instantiated the calibrator")
# Define model parser and corresponding engine builder.
if parser == "caffe":
assert os.path.exists(expand_path(caffe_caffemodel)), (
"Caffemodel not found at {caffe_caffemodel}"
)
assert os.path.exists(expand_path(caffe_prototxt)), (
"Prototxt model not found at {caffe_prototxt}"
)
logger.info(
"Positional_args: {prototxt}, {caffemodel}, {dims}".format(
caffemodel=caffe_caffemodel, prototxt=caffe_prototxt,
dims=input_dims
))
builder = CaffeEngineBuilder(caffe_prototxt,
caffe_caffemodel,
input_node_name,
input_dims,
output_node_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=dtype,
verbose=verbose,
calibrator=calibrator)
elif parser == "uff":
if not isinstance(input_dims, dict):
input_dims = {input_node_name: input_dims}
builder = UFFEngineBuilder(uff_model,
input_node_name,
input_dims,
output_node_names=output_node_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=dtype,
verbose=verbose,
calibrator=calibrator)
elif parser == "etlt":
tmp_uff_model, _ = parse_etlt_model(etlt_model, key)
if not isinstance(input_dims, dict):
input_dims = {input_node_name: input_dims}
logger.info(f"input_dims: {input_dims}")
builder = UFFEngineBuilder(tmp_uff_model,
input_node_name,
input_dims,
output_node_names=output_node_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=dtype,
verbose=verbose,
calibrator=calibrator)
else:
raise ValueError("Parser format not supported: {}".format(parser))
engine = Engine(builder.get_engine())
# write engine to file.
if trt_engine:
with open(expand_path(engine_serialize), 'wb') as efile:
efile.write(engine._engine.serialize())
efile.closed
return input_node_name, output_node_names, engine
def clean_up(ctx):
"Clear up context at exit."
ctx.pop()
tools.clear_context_caches()
logger.info("Exiting execution. Thank you for using the calibrator.")
def prepare_chunk(image_ids, image_list,
image_width=960,
image_height=544,
channels=3,
scale=(1 / 255.),
batch_size=8):
"Create a chunk for data for data for data dump to a tensorfile."
dump_placeholder = np.zeros((batch_size, channels, image_height, image_width))
for i in xrange(len(image_ids)):
idx = image_ids[i]
resized_image = Image.open(image_list[idx]).resize((image_width, image_height), Image.ANTIALIAS)
dump_input = np.asarray(resized_image).astype(np.float32).transpose(2, 0, 1) * scale
dump_placeholder[i, :, :, :] = dump_input
return dump_placeholder
def create_data_dump(input_image_dir,
calibration_data_filename,
batch_size=16,
calibration_n_batches=500,
image_height=544,
image_width=960,
channels=3,
random_data=False):
"Create a data dump tensorfile for calibration."
# If random data then just dump random samples.
if random_data:
# Writing random dump file.
with TensorFile(calibration_data_filename, 'w') as f:
for _ in tqdm(xrange(calibration_n_batches)):
f.write(np.random.sample((batch_size, ) + (batch_size,
image_height,
image_width)))
f.closed
else:
# Else create dump from a directory of images.
if not os.path.isdir(expand_path(input_image_dir)):
raise ValueError("Need an valid image_dir for creating image_dump: {}".format(input_image_dir))
num_images = calibration_n_batches * batch_size
if os.path.is_dir(input_image_dir):
image_idx = random.sample(xrange(len([item for item in os.listdir(expand_path(input_image_dir))
if item.endswith('.jpg')])), num_images)
image_list = [expand_path(f"{input_image_dir}/{image}") for image in os.listdir(expand_path(input_image_dir))
if image.endswith('.jpg')]
# Writing out processed dump.
with TensorFile(calibration_data_filename, 'w') as f:
for chunk in tqdm(image_idx[x:x+batch_size] for x in xrange(0, len(image_idx), batch_size)):
dump_data = prepare_chunk(chunk, image_list, batch_size=batch_size)
f.write(dump_data)
f.closed
def parse_command_line():
'''
Parse command line arguments
'''
parser = argparse.ArgumentParser(description='Int8 calibration table generator for Caffe/UFF models.')
parser.add_argument('-d',
'--data_file_name',
help='The tensorrt calibration tensor file')
parser.add_argument('-o',
'--output_node_names',
help='Comma separated node names to be marked as output blobs of the model.',
default='conv2d_cov/Sigmoid,conv2d_bbox')
parser.add_argument('-p',
'--prototxt',
help='caffe inference prototxt file',
default=None)
parser.add_argument('-c',
'--caffemodel',
help='caffe inference caffemodel file',
default=None)
parser.add_argument('-u',
'--uff_model',
default=None,
help="Path to uff model file.")
parser.add_argument('-bs',
'--batch_size',
help='Inference batch size, default=1',
type=int,
default=16)
parser.add_argument('-b',
'--calibration_n_batches',
help="Flag to enable kitti dump",
type=int,
default=100)
parser.add_argument('--input_dims',
nargs='+',
default=[3, 544, 960],
help='Input dimension. Forced to c h w',
type=int)
parser.add_argument('--parser',
default='caffe',
help='Model parser to be called. Currently only caffe is supported.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Logger verbosity level. ')
parser.add_argument('--cache',
default=os.path.join(os.getcwd(),'cal_table.txt'),
help='Output calibration cache file')
parser.add_argument('--create_dump',
action='store_true',
help='Create calibrator tensorfile from directory of images')
parser.add_argument('--input_image_dir',
default=None,
help='Directory of input images to create a tensorfile',
type=str)
parser.add_argument('--experiment_spec',
default=None,
help="Experiment spec file used to train the model.",
type=str)
parser.add_argument('-g',
'--gpu_id',
default=0,
type=int,
help='Index of the GPU to work on')
parser.add_argument("--etlt_model",
default=None,
type=str,
help="Path to the etlt model file.")
parser.add_argument("--key",
default="tlt_encode",
help="Key to decode etlt model.",
default="",
type=str)
parser.add_argument('--random_data',
action='store_true',
help="Calibrate on random data.")
parser.add_argument('--trt_engine',
help='Save pre compiled trt engine',
action='store_true')
parser.add_argument('--engine',
help='Path to save trt engine',
default=os.path.join(os.getcwd(), 'engine.trt'),
type=str)
args = parser.parse_args()
return args
def main():
'''Main wrapper to generate calibration table from a pretrained caffe model.'''
# Creating a cuda session.
cuda.init()
current_dev = cuda.Device(0)
ctx = current_dev.make_context()
ctx.push()
# Parse parameters.
args = parse_command_line()
prototxt = args.prototxt
caffemodel = args.caffemodel
uff_model = args.uff_model
etlt_model = args.etlt_model
key = args.key
trt_parser = args.parser
calibration_data_filename = args.data_file_name
calibration_cache_filename= args.cache
output_node_names = args.output_node_names.split(',')
(channels, image_height, image_width) = tuple(args.input_dims)
batch_size = args.batch_size
calibration_n_batches = args.calibration_n_batches
input_image_dir = args.input_image_dir
experiment_spec = args.experiment_spec
# Defining logger configuration.
verbosity = 'INFO'
verbose = args.verbose
if verbose:
verbosity = 'DEBUG'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
logger.info("Shape: {} {} {}".format(channels, image_height, image_width))
# Create a data dump file for calibration.
if args.create_dump:
logger.info("Creating data dump file for calibration process")
create_data_dump(
input_image_dir,
calibration_data_filename,
random_data=args.random_data,
batch_size=batch_size,
calibration_n_batches=calibration_n_batches,
image_height=image_height,
image_width=image_width,
channels=channels
)
# Calibrate the model.
input_node_name, output_node_names, engine = calibrate_fm_caffe(
prototxt, caffemodel, uff_model, etlt_model, key,
input_dims=tuple(args.input_dims),
calibration_data_filename=calibration_data_filename,
calibration_cache_filename=calibration_cache_filename,
output_node_names=output_node_names,
calibration_batch_size=batch_size,
calibration_n_batches=args.calibration_n_batches,
trt_engine=args.trt_engine,
engine_serialize=args.engine,
parser=trt_parser,
experiment_spec=experiment_spec,
verbose=verbose
)
del engine
# Kill context at exit
atexit.register(clean_up, ctx)
if __name__=="__main__":
main()
| tao_tensorflow1_backend-main | internal/export/calibrator_app.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""Module containing implementation of release packaging."""
| tao_tensorflow1_backend-main | release/__init__.py |
import argparse
import glob
import os
import subprocess
import sys
from ci.utils import CI
from nvidia_tao_tf1.core.utils.path_utils import expand_path
def execute_command(command_args):
"""Execute the shell command."""
for command in command_args:
try:
subprocess.call(
command,
shell=True,
stdout=sys.stdout
)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
f"Command {command} failed with error {exc}") from exc
def parse_command_line(cl_args=None):
"""Parse builder command line"""
parser = argparse.ArgumentParser(
prog="build_kernel",
description="Build TAO custom ops."
)
parser.add_argument(
"--force",
action="store_true",
default=False,
help="Force build the kernel."
)
parser.add_argument(
"--op_names",
type=str,
nargs="+",
help="The name of the op to build.",
default="all"
)
return vars(parser.parse_args(cl_args))
def list_op_libraries(path, op_names):
"""List of library paths."""
if os.path.isdir(expand_path(path)):
op_list = [
item for item in os.listdir(expand_path(path)) if
os.path.isdir(
os.path.join(path, item)
)
]
if op_names == "all":
return op_list
return [item for item in op_list if item in op_names]
def build_ops(op_list, path, force=False):
"""Build custom ops."""
prev_dir = os.getcwd()
for op in op_list:
print(f"Building op {op}")
build_command = []
if force:
build_command.append("make clean")
build_command.append("make")
op_path = os.path.join(path, op)
if os.path.isdir(op_path):
os.chdir(op_path)
if not os.path.exists(os.path.join(op_path, "Makefile")):
continue
execute_command(build_command)
assert os.path.isfile(os.path.join(op_path, f"../op_{op}.so")), (
f"\'{op}\' build failed."
)
os.chdir(prev_dir)
def main(cl_args=sys.argv[1:]):
"""Run kernel builder."""
args = parse_command_line(cl_args=cl_args)
force_build = args["force"]
op_names = args["op_names"]
env_var = "NV_TAO_TF_TOP"
if CI:
env_var = "CI_PROJECT_DIR"
if "WORKSPACE" not in os.environ.keys():
os.environ["WORKSPACE"] = os.getenv(env_var, "/workspace/tao-tf1")
h_path = os.getenv(env_var, "/workspace/tao-tf1")
glob_string = os.path.abspath(os.path.expanduser(f"{h_path}/nvidia_tao_tf1/**/*/lib"))
kernel_lib_root = glob.glob(
glob_string,
recursive=True
)
for lib_path in kernel_lib_root:
op_list = list_op_libraries(lib_path, op_names)
build_ops(op_list, lib_path, force=force_build)
if __name__ == "__main__":
main() | tao_tensorflow1_backend-main | release/docker/build_kernels.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""Version string for the TAO Toolkit TF2 AI models/tasks."""
MAJOR = "5"
MINOR = "0.0"
PATCH = "01"
PRE_RELEASE = ''
# Getting the build number.
def get_build_info():
"""Get the build version number."""
# required since setup.py runs a version string and global imports aren't executed.
import os # noqa pylint: disable=import-outside-toplevel
build_file = "build.info"
if not os.path.exists(build_file):
raise FileNotFoundError("Build file doesn't exist.")
patch = 0
with open(build_file, 'r') as bfile:
patch = bfile.read().strip()
assert bfile.closed, "Build file wasn't closed properly."
return patch
try:
PATCH = get_build_info()
except FileNotFoundError:
pass
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
# Version of the library.
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
# Version of the file format.
__format_version__ = 2
# Other package info.
__package_name__ = "nvidia-tao-tf1"
__description__ = "NVIDIA's package for DNN implementation on TensorFlow 1.0 for use with TAO Toolkit."
__keywords__ = "nvidia, tao, tf1"
__contact_names__ = "Varun Praveen"
__contact_emails__ = "[email protected]"
__license__ = "Apache 2.0 License"
| tao_tensorflow1_backend-main | release/python/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Packaging modules for TAO Toolkit.""" | tao_tensorflow1_backend-main | release/python/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing utility functions required for packaging TAO Toolkit modules."""
| tao_tensorflow1_backend-main | release/python/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for packaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import setuptools
# Rename all .py files to .py_tmp temporarily.
ignore_list = ['__init__.py', '__version__.py']
LOCAL_DIR = os.path.dirname(os.path.abspath(__file__))
def up_directory(dir_path, n=1):
"""Go up n directories from dir_path."""
dir_up = dir_path
for _ in range(n):
dir_up = os.path.split(dir_up)[0]
return dir_up
TOP_LEVEL_DIR = up_directory(LOCAL_DIR, 3)
def remove_prefix(dir_path):
"""Remove a certain prefix from path."""
max_path = 8
prefix = dir_path
while max_path > 0:
prefix = os.path.split(prefix)[0]
if prefix.endswith('ai_infra'):
return dir_path[len(prefix) + 1:]
max_path -= 1
return dir_path
def get_subdirs(path):
"""Get all subdirs of given path."""
dirs = os.walk(path)
return [remove_prefix(x[0]) for x in dirs]
def rename_py_files(path, ext, new_ext, ignore_files):
"""Rename all .ext files in a path to .new_ext except __init__ files."""
files = glob.glob(path + '/*' + ext)
for ignore_file in ignore_files:
files = [f for f in files if ignore_file not in f]
for filename in files:
os.rename(filename, filename.replace(ext, new_ext))
def get_version_details():
"""Simple function to get packages for setup.py."""
# Define env paths.
LAUNCHER_SDK_PATH = os.path.join(TOP_LEVEL_DIR, "release/python")
# Get current __version__.
version_locals = {}
with open(os.path.join(LAUNCHER_SDK_PATH, 'version.py')) as version_file:
exec(version_file.read(), {}, version_locals)
return version_locals
def cleanup():
"""Cleanup directories after the build process."""
req_subdirs = get_subdirs(TOP_LEVEL_DIR)
# Cleanup. Rename all .py_tmp files back to .py and delete pyc files
for dir_path in req_subdirs:
dir_path = os.path.join(TOP_LEVEL_DIR, dir_path)
# TODO: @vpraveen Think about removing python files before the final
# release.
rename_py_files(dir_path, '.py_tmp', '.py', ignore_list)
pyc_list = glob.glob(dir_path + '/*.pyc')
for pyc_file in pyc_list:
os.remove(pyc_file)
def find_packages(package_name):
"""List of packages.
Args:
package_name (str): Name of the package.
Returns:
packages (list): List of packages.
"""
packages = setuptools.find_packages(package_name)
packages = [f"{package_name}.{f}" for f in packages]
packages.append(package_name)
return packages
| tao_tensorflow1_backend-main | release/python/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO TF1 root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/__init__.py |
"TAO module containing blocks."
from nvidia_tao_tf1.blocks import dataloader
from nvidia_tao_tf1.blocks import learning_rate_schedules
from nvidia_tao_tf1.blocks import losses
from nvidia_tao_tf1.blocks import models
from nvidia_tao_tf1.blocks import optimizers
from nvidia_tao_tf1.blocks import trainer
__all__ = (
"dataloader",
"learning_rate_schedules",
"losses",
"models",
"optimizers",
"trainer",
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for ingesting training data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import multiprocessing
import os
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader import processors
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
FEATURE_CAMERA,
TransformedExample,
)
from nvidia_tao_tf1.blocks.trainer.data_loader_interface import (
DataLoaderInterface,
)
from nvidia_tao_tf1.core.coreobject import save_args
logger = logging.getLogger(__name__)
MAX_SHUFFLE_BUFFER = 10000
def _normalize_images(example, dtype=tf.float32):
"""Cast uint8 jpeg/png images to dtype and normalize it into the range [0 , 1].
Args:
example (Example or TransformedExample): The example that contains images.
dtype (tf.dtypes.DType): The dtype that the images are cast to.
"""
camera = example.instances[FEATURE_CAMERA]
images = 1.0 / 255 * tf.cast(camera.images, dtype)
example.instances[FEATURE_CAMERA] = camera._replace(images=images)
def _pick_largest_image_dtype(data_sources):
"""Pick a image_dtype for a list of data_source.
The policy is that when a list of data_source with mixed dtypes are given,
the dtype with highest precision is picked.
Args:
data_sources (list<data_source>): A list of data_sources.
Return:
(tf.dtypes.Dtype) The picked datatype.
"""
sorted_data_sources = sorted(
data_sources, key=lambda data_source: data_source.image_dtype.size
)
if (
sorted_data_sources[0].image_dtype.size
!= sorted_data_sources[-1].image_dtype.size
):
logger.warning(
"Warning: Data sources are not with the same dtype, might result in reduced perf."
"For example: dtype {} will be casted to dtype {}".format(
sorted_data_sources[0].image_dtype.name,
sorted_data_sources[-1].image_dtype.name,
)
)
return sorted_data_sources[-1].image_dtype
class DataLoader(DataLoaderInterface):
"""Functor for feeding data into Estimators."""
@save_args
def __init__(
self,
data_sources,
augmentation_pipeline,
batch_size=None,
batch_size_per_gpu=None,
shuffle=True,
preprocessing=None,
pipeline_dtype=None,
pseudo_sharding=False,
repeat=True,
sampling="user_defined",
serial_augmentation=False,
label_names=None,
):
"""
Construct an input pipeline.
Args:
data_sources (list): Each element is a ``DataSource`` to read examples from.
augmentation_pipeline (Pipeline or list[Processor]): Transformations that get applied
to examples from all data sources.
batch_size (int): Number of examples to batch together.
If not set, batch_size_per_gpu should be set.
batch_size_per_gpu (int): Number of examples per gpu to batch together.
If not set, batch_size_per_gpu should be set.
shuffle (bool): If True, data will be shuffled.
preprocessing (list<Processor>): Processors for preprocessing all sources. If no
temporal batching processor is included, one is automatically added to the list
to ensure DataLoader output always includes a time dimension.
NOTE: Defaults to None for backwards compatibility with DataSources that implement
temporal batching (i.e. produce 4D images.)
pipeline_dtype (str): Feature tensors (eg. images) are converted to this
dtype for processing. Defaults 'float16'.
pseudo_sharding (bool): If True, then data is not actually sharded, but different
shuffle seeds are used to differentiate shard batches.
repeat (bool): Whether or not this DataLoader iterates over its contents ad vitam
aeternam.
sampling (str): A sampling strategy for how to sample the individual data sources.
Accepted values are:
'user_defined': Use the sample_ratio field of each data source. This is the
default behavior.
'uniform': The equivalent of every data source's sampling ratio being 1., i.e.
each data source is equally likely to be the one producing the next sample.
'proportional': The sample ratio of each data source is proportional to the
number of samples within each data source.
`None`: No sampling is applied. Instead, the data sources are all concatenated
to form a single dataset. This should be the default when iterating on
a validation / test dataset, as we would like to see each sample exactly
once.
!!! NOTE !!!: Do not use this in conjunction with `repeat=True`.
serial_augmentation(bool): Whether to apply augmentation in serial to aid in
reproducibility. Default is False which means augmentations would be applied in
parallel.
label_names (set<str>): Set of label names produced by this data loader.
Raises:
ValueError: When no ``data_sources`` are provided.
"""
super(DataLoader, self).__init__()
if not data_sources:
raise ValueError("DataLoader excepts at least one element in data_sources.")
if sampling not in ["user_defined", "uniform", "proportional", None]:
raise ValueError("Unsupported sampling %s" % sampling)
self._data_sources = data_sources
self._augmentation_pipeline = augmentation_pipeline
self._pipeline_dtype = pipeline_dtype
# Note, currently we are supporting both batch size for all workers (batch_size)
# and batch size per gpu (batch_size_per_gpu). Only one of them can be set.
# TODO (weich) remove batch_size support and fully switch to batch_size_per_gpu.
if not ((batch_size is None) ^ (batch_size_per_gpu is None)):
raise ValueError(
"Exactly one of batch_size and batch_size_per_gpu must be set."
)
if batch_size is not None and batch_size <= 0:
raise ValueError("batch_size must be positive.")
if batch_size_per_gpu is not None and batch_size_per_gpu <= 0:
raise ValueError("batch_size_per_gpu must be positive.")
self._batch_size = batch_size
self._batch_size_per_gpu = batch_size_per_gpu
self._shuffle = shuffle
self._shard_count = 1
self._shard_index = 0
self._sharding_configured = False
self._pseudo_sharding = pseudo_sharding
self._serial_augmentation = serial_augmentation
self._label_names = label_names or set()
logger.info("Serial augmentation enabled = {}".format(serial_augmentation))
self._sampling = sampling
logger.info("Pseudo sharding enabled = {}".format(pseudo_sharding))
self._temporal_size = None
# TODO(vkallioniemi): Default to always adding a temporal batcher if it is missing. Not
# adding it when preprocessing is None is a temporary solution that ensures that code
# that is not using geometric primitives keeps on working.
if preprocessing is None:
preprocessing = []
else:
has_temporal_batcher = False
for processor in preprocessing:
if isinstance(processor, processors.TemporalBatcher):
self._temporal_size = processor.size
has_temporal_batcher = True
if not has_temporal_batcher:
# Ensure outputs always include a temporal dimension because preprocessing,
# augmentation and models expect that.
self._temporal_size = 1
preprocessing = [processors.TemporalBatcher()] + preprocessing
self._preprocessing = preprocessing
self._repeat = repeat
@property
def steps(self):
"""Return the number of steps."""
if not self._sharding_configured:
raise ValueError(
"After constructing the DataLoader `set_shard()` method needs to "
"be called to set the sharding arguments and configure the sources."
)
size = len(self)
# Assign each worker a fraction of total steps,
# because training data is evenly sharded among them.
sharded_size = int(math.ceil(size / self._shard_count))
return int(math.ceil(sharded_size / self._batch_size_per_gpu))
@property
def batch_size_per_gpu(self):
"""Return the number of examples each batch contains."""
return self._batch_size_per_gpu
@property
def label_names(self):
"""Gets the label names produced by this dataloader."""
return self._label_names
def __len__(self):
"""Return the total number of examples that will be produced."""
if not self._sharding_configured:
raise ValueError(
"After constructing the DataLoader `set_shard()` method needs to "
"be called to set the sharding arguments and configure the sources."
)
num_examples = 0
for source in self._data_sources:
if source.supports_temporal_batching():
num_examples += len(source)
else:
num_examples += len(source) // (self._temporal_size or 1)
return num_examples
def _configure_sources(self):
all_max_dims = None
for source in self._data_sources:
if source.supports_sharding():
# source handles its own sharding
source.set_shard(
self._shard_count, self._shard_index, self._pseudo_sharding
)
if source.supports_shuffling() and self._shuffle:
source.set_shuffle(MAX_SHUFFLE_BUFFER)
source.set_sequence_length(self._temporal_size)
source.initialize()
# Verify that all of the input sources are temporal-compatible given the settings
if (
self._temporal_size
and len(source) % self._temporal_size != 0
and not source.supports_temporal_batching()
):
raise ValueError(
"All datasources must have a number of samples divisible by "
"the temporal sequence length. Sequence Length: {}. "
"Invalid source: {}".format(self._temporal_size, source)
)
max_dims = source.get_image_properties()
if not all_max_dims:
all_max_dims = max_dims
else:
all_max_dims = [max(a, b) for a, b in zip(all_max_dims, max_dims)]
logger.info("Max Image Dimensions (all sources): {}".format(all_max_dims))
for source in self._data_sources:
source.set_image_properties(*all_max_dims)
# Handle sampling ratios here. Note that the default of 'user_defined' need not be
# addressed since it'll resolve to using what each data_source was configured with
# in terms of sample_ratio.
if self._sampling == "uniform":
for source in self._data_sources:
source.sample_ratio = 1.0
elif self._sampling == "proportional":
for source in self._data_sources:
source.sample_ratio = float(len(source))
def set_shard(self, shard_count=1, shard_index=0):
"""
Configure the sharding for the current job.
Args:
shard_count (int): Number of shards that each dataset will be split into.
shard_index (int): Index of shard to use [0, shard_count-1].
"""
if shard_count < 1:
raise ValueError("at least 1 shard is needed")
if shard_index < 0 or shard_index >= shard_count:
raise ValueError("shard_index must be between 0 and shard_count-1")
self._shard_count = shard_count
self._shard_index = shard_index
if self._batch_size_per_gpu is None:
# Compute batch_size_per_gpu of training for each process when using multi-GPU
batch_size_per_gpu, remainder = divmod(self._batch_size, shard_count)
if remainder != 0:
raise ValueError(
"Cannot evenly distribute a batch size of {} over {} "
"processors".format(self._batch_size, shard_count)
)
self._batch_size_per_gpu = batch_size_per_gpu
self._configure_sources()
self._sharding_configured = True
def summary(self, print_fn=None):
"""
Print a summary of the contents of this data loader.
Args:
print_fn (function): Optional function that each line of the summary will be passed to.
Prints to stdout if not specified.
"""
if print_fn is None:
print_fn = print
print_fn(" - examples: {}".format(len(self)))
print_fn(" - steps: {}".format(self.steps))
print_fn(" - batch size per gpu: {}".format(self.batch_size_per_gpu))
print_fn(" - shuffle: {}".format(self._shuffle))
print_fn(" - shard count: {}".format(self._shard_count))
print_fn(" - shard index: {}".format(self._shard_index))
print_fn(" - pseudo-sharding: {}".format(self._pseudo_sharding))
print_fn(" - serial augmentation: {}".format(self._serial_augmentation))
print_fn(" - sources:")
def indented_print_fn(string):
print_fn(" " + string)
for i, source in enumerate(self._data_sources):
indented_print_fn("Source {}: {}".format(i, type(source).__name__))
source.summary(print_fn=indented_print_fn)
if self._preprocessing:
print_fn(" - preprocessing:")
for processor in self._preprocessing:
indented_print_fn(" - {}".format(str(processor)))
if self._augmentation_pipeline:
print_fn(" - augmentations:")
for augmentation in self._augmentation_pipeline:
indented_print_fn(" - {}".format(str(augmentation)))
def call(self):
"""Produce examples with input features (such as images) and labels.
Returns:
examples (Example / SequenceExample): Example structure containing tf.Tensor that have
had augmentation applied to them.
"""
if self._pipeline_dtype:
image_loading_dtype = tf.dtypes.as_dtype(self._pipeline_dtype)
else:
image_loading_dtype = _pick_largest_image_dtype(self._data_sources).name
# TODO (vkallioniemi): refactor to move batching to sources.
# See [performance guideline](https://www.tensorflow.org/performance/datasets_performance)
# before making changes here.
core_count = multiprocessing.cpu_count()
# Roughly tuned on pathnet, 8 GPUS, against 469735 images roughly half of which were png
# and half were fp16.
# Previously this field was configurable, we discovered that in no situation was using 1
# IO thread better, and it is strictly much much worse on AVDC.
# TODO(@williamz): When multi-node is used, base shard count and cpu count on the local
# node.
io_threads = max(2 * (core_count // self._shard_count), 1)
if self._serial_augmentation:
compute_threads = 1
else:
compute_threads = core_count // self._shard_count
# For a typical batch of 32 images sized 604x960x3, the memory requirements are:
# 1 buffered batch: 32 * 3MB ~= 100MB.
buffered_batches = 4
logger.info(
"number of cpus: %d, io threads: %d, compute threads: %d, buffered batches: %d",
core_count,
io_threads,
compute_threads,
buffered_batches,
)
logger.info(
"total dataset size %d, number of sources: %d, batch size per gpu: %d, steps: %d",
len(self),
len(self._data_sources),
self.batch_size_per_gpu,
self.steps,
)
# TODO(@williamz): Break this method up into smaller functional pieces.
datasets = []
weights = []
for source in self._data_sources:
dataset = source()
if not source:
logger.warning("skipping empty datasource")
continue
# TODO(vkallioniemi): Remove this if statement once all sources have been changed
# to parse examples by default.
if source.parse_example is not None:
dataset = dataset.apply(source.parse_example)
logger.info(
"shuffle: %s - shard %d of %d",
self._shuffle,
self._shard_index,
self._shard_count,
)
# Apply temporal batching and other global preprocessing.
# NOTE: This needs to be performed before sharding because operations such as
# temporal batching require examples to be chunked together.
for processor in self._preprocessing:
# If source itself handles the temporal batching (like sqlite, we do not apply
# temporal batcher here).
if source.supports_temporal_batching():
if isinstance(processor, processors.TemporalBatcher):
continue
dataset = dataset.apply(processor)
# Evenly distribute records from each dataset to each GPU.
if self._shard_count != 1:
if not source.supports_sharding() and not self._pseudo_sharding:
dataset = dataset.shard(self._shard_count, self._shard_index)
if self._shuffle:
if not source.supports_shuffling():
dataset = dataset.apply(
tf.data.experimental.shuffle_and_repeat(
buffer_size=min(len(source), MAX_SHUFFLE_BUFFER), count=None
)
)
elif self._repeat:
# NOTE (@williamz): this line seems to imply the tf.data.Dataset object
# produced by the source handles the repeat() call in a way that does not
# result in the order being the same at every iteration over the entire dataset.
# This needs to be investigated properly for the sqlite data source.
dataset = dataset.repeat()
elif self._repeat:
dataset = dataset.repeat()
# Combine pipelines so that we do not lose information when affine
# transforms get applied at the end of a pipeline.
# The processors that are not transform processors need to be applied later.
# They require the images to be loaded.
delayed_processors = []
transform_processors = []
if isinstance(source.preprocessing, list):
# Source processors to apply specified as a list.
pipeline = processors.Pipeline(source.preprocessing)
else:
pipeline = source.preprocessing
if isinstance(self._augmentation_pipeline, list):
# Processors to apply specified as a list.
for processor in self._augmentation_pipeline:
if not isinstance(processor, processors.TransformProcessor):
delayed_processors.append(processor)
else:
transform_processors.append(processor)
augmentation_pipeline = processors.Pipeline(transform_processors)
pipeline += augmentation_pipeline
else:
pipeline += self._augmentation_pipeline
if pipeline:
dataset = dataset.map(pipeline, num_parallel_calls=compute_threads)
datasets.append(dataset)
weights.append(source.sample_ratio)
if self._sampling is not None:
total = sum(weights)
weights = [weight / total for weight in weights]
logger.info("sampling %d datasets with weights:", len(datasets))
for index, weight in enumerate(weights):
logger.info("source: %d weight: %f", index, weight)
combined = tf.data.experimental.sample_from_datasets(
datasets, weights=weights
)
else:
combined = datasets[0]
for dataset in datasets[1:]:
combined = combined.concatenate(dataset)
# Note: increasing parallelism will increase memory usage.
combined = combined.map(
processors.AssetLoader(output_dtype=image_loading_dtype),
num_parallel_calls=io_threads,
)
if delayed_processors:
delayed_pipeline = processors.Pipeline(delayed_processors)
else:
delayed_pipeline = processors.Pipeline([])
# Drop remainder since some downstream users rely on known batch size, specifically
# weighted_binary_crossentropy.
# TODO(ehall): Fix loss function to be able to handle variable sized batches.
# TODO(vkallioniemi): consider batching per dataset instead.
combined = combined.batch(self._batch_size_per_gpu, drop_remainder=True)
# Buffer more data here to pipeline CPU-GPU processing.
combined = combined.prefetch(buffered_batches)
if tf.executing_eagerly():
batch = combined
else:
# Take one item from dataloader and use it repeatedly to serve the rest pipeline.
# This feature is used to benchmark the training pipeline with all data in memory.
# !!! Do not enable it in production.
if os.environ.get("MODULUS_DATALOADER_BYPASS_LOADING"):
combined = combined.take(1)
combined = combined.cache()
combined = combined.repeat()
iterator = tf.compat.v1.data.make_initializable_iterator(combined)
# So it can be easily initialized downstream.
tf.compat.v1.add_to_collection("iterator_init", iterator.initializer)
batch = iterator.get_next(name="data_loader_out")
# Cast / normalize images if the images are uint8 png / jpeg.
# TODO(weich, mlehr) batch is not an Example instance in eager mode.
# Disable this for eager mode until this issue has been fixed.
if isinstance(batch, TransformedExample):
example = batch.example
else:
example = batch
if example.instances[FEATURE_CAMERA].images.dtype == tf.uint8:
_normalize_images(example)
# Apply transform matrices stored with a TransformedExample. Transformations
# are delayed up until after tf.data processing so that they can be performed on
# the GPU.
examples = batch() if isinstance(batch, TransformedExample) else batch
# (mlehr): Delayed pipeline needs to be applied after the transform processors.
# If the delayed pipeline is applied before, the output of it will be SequenceExample, so
# the transform processors would not be applied.
processed_examples = delayed_pipeline(examples)
return processed_examples
class DataLoaderYOLOv3(DataLoader):
"""Customized DataLoader for YOLO v3."""
def call(self):
"""Produce examples with input features (such as images) and labels.
Returns:
examples (Example / SequenceExample): Example structure containing tf.Tensor that have
had augmentation applied to them.
"""
if self._pipeline_dtype:
image_loading_dtype = tf.dtypes.as_dtype(self._pipeline_dtype)
else:
image_loading_dtype = _pick_largest_image_dtype(self._data_sources).name
# TODO (vkallioniemi): refactor to move batching to sources.
# See [performance guideline](https://www.tensorflow.org/performance/datasets_performance)
# before making changes here.
core_count = multiprocessing.cpu_count()
# Roughly tuned on pathnet, 8 GPUS, against 469735 images roughly half of which were png
# and half were fp16.
# Previously this field was configurable, we discovered that in no situation was using 1
# IO thread better, and it is strictly much much worse on AVDC.
# TODO(@williamz): When multi-node is used, base shard count and cpu count on the local
# node.
io_threads = max(2 * (core_count // self._shard_count), 1)
if self._serial_augmentation:
compute_threads = 1
else:
compute_threads = core_count // self._shard_count
# For a typical batch of 32 images sized 604x960x3, the memory requirements are:
# 1 buffered batch: 32 * 3MB ~= 100MB.
buffered_batches = tf.data.experimental.AUTOTUNE
logger.info(
"number of cpus: %d, io threads: %d, compute threads: %d, buffered batches: %d",
core_count,
io_threads,
compute_threads,
buffered_batches,
)
logger.info(
"total dataset size %d, number of sources: %d, batch size per gpu: %d, steps: %d",
len(self),
len(self._data_sources),
self.batch_size_per_gpu,
self.steps,
)
# TODO(@williamz): Break this method up into smaller functional pieces.
datasets = []
weights = []
for source in self._data_sources:
dataset = source()
if not source:
logger.warning("skipping empty datasource")
continue
# TODO(vkallioniemi): Remove this if statement once all sources have been changed
# to parse examples by default.
if source.parse_example is not None:
dataset = dataset.apply(source.parse_example)
logger.info(
"shuffle: %s - shard %d of %d",
self._shuffle,
self._shard_index,
self._shard_count,
)
# Apply temporal batching and other global preprocessing.
# NOTE: This needs to be performed before sharding because operations such as
# temporal batching require examples to be chunked together.
for processor in self._preprocessing:
# If source itself handles the temporal batching (like sqlite, we do not apply
# temporal batcher here).
if source.supports_temporal_batching():
if isinstance(processor, processors.TemporalBatcher):
continue
dataset = dataset.apply(processor)
# Evenly distribute records from each dataset to each GPU.
if self._shard_count != 1:
if not source.supports_sharding() and not self._pseudo_sharding:
dataset = dataset.shard(self._shard_count, self._shard_index)
if self._shuffle:
if not source.supports_shuffling():
dataset = dataset.apply(
tf.data.experimental.shuffle_and_repeat(
buffer_size=min(len(source), MAX_SHUFFLE_BUFFER), count=None
)
)
elif self._repeat:
# NOTE (@williamz): this line seems to imply the tf.data.Dataset object
# produced by the source handles the repeat() call in a way that does not
# result in the order being the same at every iteration over the entire dataset.
# This needs to be investigated properly for the sqlite data source.
dataset = dataset.repeat()
elif self._repeat:
dataset = dataset.repeat()
# Combine pipelines so that we do not lose information when affine
# transforms get applied at the end of a pipeline.
# The processors that are not transform processors need to be applied later.
# They require the images to be loaded.
delayed_processors = []
transform_processors = []
if isinstance(source.preprocessing, list):
# Source processors to apply specified as a list.
pipeline = processors.Pipeline(source.preprocessing)
else:
pipeline = source.preprocessing
if isinstance(self._augmentation_pipeline, list):
# Processors to apply specified as a list.
for processor in self._augmentation_pipeline:
if not isinstance(processor, processors.TransformProcessor):
delayed_processors.append(processor)
else:
transform_processors.append(processor)
augmentation_pipeline = processors.Pipeline(transform_processors)
pipeline += augmentation_pipeline
else:
pipeline += self._augmentation_pipeline
if pipeline:
dataset = dataset.map(pipeline, num_parallel_calls=tf.data.experimental.AUTOTUNE)
datasets.append(dataset)
weights.append(source.sample_ratio)
if self._sampling is not None:
total = sum(weights)
weights = [weight / total for weight in weights]
logger.info("sampling %d datasets with weights:", len(datasets))
for index, weight in enumerate(weights):
logger.info("source: %d weight: %f", index, weight)
combined = tf.data.experimental.sample_from_datasets(
datasets, weights=weights
)
else:
combined = datasets[0]
for dataset in datasets[1:]:
combined = combined.concatenate(dataset)
# Note: increasing parallelism will increase memory usage.
combined = combined.map(
processors.AssetLoader(output_dtype=image_loading_dtype),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
if delayed_processors:
delayed_pipeline = processors.Pipeline(delayed_processors)
else:
delayed_pipeline = processors.Pipeline([])
# Drop remainder since some downstream users rely on known batch size, specifically
# weighted_binary_crossentropy.
# TODO(ehall): Fix loss function to be able to handle variable sized batches.
# TODO(vkallioniemi): consider batching per dataset instead.
combined = combined.batch(self._batch_size_per_gpu, drop_remainder=True)
# Buffer more data here to pipeline CPU-GPU processing.
combined = combined.prefetch(buffered_batches)
if tf.executing_eagerly():
batch = combined
else:
# Take one item from dataloader and use it repeatedly to serve the rest pipeline.
# This feature is used to benchmark the training pipeline with all data in memory.
# !!! Do not enable it in production.
if os.environ.get("MODULUS_DATALOADER_BYPASS_LOADING"):
combined = combined.take(1)
combined = combined.cache()
combined = combined.repeat()
iterator = tf.compat.v1.data.make_initializable_iterator(combined)
# So it can be easily initialized downstream.
tf.compat.v1.add_to_collection("iterator_init", iterator.initializer)
batch = iterator.get_next(name="data_loader_out")
# Apply transform matrices stored with a TransformedExample. Transformations
# are delayed up until after tf.data processing so that they can be performed on
# the GPU.
examples = batch() if isinstance(batch, TransformedExample) else batch
# (mlehr): Delayed pipeline needs to be applied after the transform processors.
# If the delayed pipeline is applied before, the output of it will be SequenceExample, so
# the transform processors would not be applied.
processed_examples = delayed_pipeline(examples)
return processed_examples
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests DataFormat class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from nvidia_tao_tf1.blocks.multi_source_loader.data_format import (
CHANNELS_FIRST,
CHANNELS_LAST,
DataFormat,
)
def test_creation_fails_with_invalid_data_format():
with pytest.raises(ValueError) as e:
DataFormat("upside_down")
assert "Unrecognized data_format 'upside_down'." in str(e.value)
def test_equality():
assert DataFormat("channels_first") == DataFormat("channels_first")
assert DataFormat("channels_last") == DataFormat("channels_last")
def test_inequality():
assert DataFormat("channels_first") != DataFormat("channels_last")
assert DataFormat("channels_last") != DataFormat("channels_first")
def test_stringify():
assert "channels_first" == str(DataFormat("channels_first"))
def test_tensor_axis_4d_channels_first():
axis = DataFormat("channels_first").axis_4d
assert axis.batch == 0
assert axis.channel == 1
assert axis.row == 2
assert axis.column == 3
def test_tensor_axis_4d_channels_last():
axis = DataFormat("channels_last").axis_4d
assert axis.batch == 0
assert axis.row == 1
assert axis.column == 2
assert axis.channel == 3
def test_tensor_axis_5d_channels_first():
axis = DataFormat("channels_first").axis_5d
assert axis.batch == 0
assert axis.time == 1
assert axis.channel == 2
assert axis.row == 3
assert axis.column == 4
def test_tensor_axis_5d_channels_last():
axis = DataFormat("channels_last").axis_5d
assert axis.batch == 0
assert axis.time == 1
assert axis.row == 2
assert axis.column == 3
assert axis.channel == 4
def test_convert_shape_4d():
shape = [64, 3, 200, 200]
identity_convert = CHANNELS_FIRST.convert_shape(shape, CHANNELS_FIRST)
assert shape == identity_convert
to_channels_last = CHANNELS_FIRST.convert_shape(shape, CHANNELS_LAST)
assert to_channels_last == [64, 200, 200, 3]
def test_convert_shape_5d():
shape = [64, 6, 3, 200, 200]
identity_convert = CHANNELS_FIRST.convert_shape(shape, CHANNELS_FIRST)
assert shape == identity_convert
to_channels_last = CHANNELS_FIRST.convert_shape(shape, CHANNELS_LAST)
assert to_channels_last == [64, 6, 200, 200, 3]
def test_convert_last_to_first():
shape = (None, None, 300, 300, 64)
identity_convert = CHANNELS_LAST.convert_shape(shape, CHANNELS_LAST)
assert shape == identity_convert
to_channels_first = CHANNELS_LAST.convert_shape(shape, CHANNELS_FIRST)
assert to_channels_first == (None, None, 64, 300, 300)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/data_format_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processors for transforming and augmenting data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.blocks.multi_source_loader.data_format import (
CHANNELS_FIRST,
CHANNELS_LAST,
DataFormat,
TensorAxis4D,
TensorAxis5D,
)
from nvidia_tao_tf1.blocks.multi_source_loader.data_loader import DataLoader
from nvidia_tao_tf1.blocks.multi_source_loader.frame_shape import FrameShape
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
empty_polygon_label,
Example,
FEATURE_CAMERA,
LABEL_MAP,
PolygonLabel,
)
__all__ = (
"CHANNELS_FIRST",
"CHANNELS_LAST",
"DataFormat",
"DataLoader",
"empty_polygon_label",
"Example",
"FEATURE_CAMERA",
"FrameShape",
"LABEL_MAP",
"PolygonLabel",
"TensorAxis4D",
"TensorAxis5D",
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A type safer wrapper for handling Keras impicitly defined data_format enums."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from functools import lru_cache
from nvidia_tao_tf1.core.coreobject import TAOObject, save_args
"""
Structure for storing 4D dimension name and index mappings.
4D images are used as batches for non-temporal/non-sequence models. To access e.g. the colors
of a 'channels_first' image tensor you would:
axis = DataFormat('channels_first').axis_5d
colors = image[:, axis.channel]
Args:
batch (int): Index of batch/image/example dimension. This dimension spans [0, batch size).
row (int): Index of row dimension. This dimension spans [0, image height).
column (int): Index of column dimension. This dimension spans [0, image width).
channel (int): Index of channel dimension. This dimension spans [0, image colors channels).
"""
TensorAxis4D = namedtuple("TensorAxis4D", ["batch", "row", "column", "channel"])
"""
Structure for storing 5D dimension name and index mappings.
5D images are used as batches for temporal/sequence models. To access e.g. the colors
of a 'channels_first' image tensor you would:
axis = DataFormat('channels_first').axis_5d
colors = image[:, axis.channel]
Args:
batch (int): Index of batch/image/example dimension. This dimension spans [0, batch size).
time (int): Index of time dimension. This dimension spans [0, time steps).
row (int): Index of row dimension. This dimension spans [0, image height).
column (int): Index of column dimension. This dimension spans [0, image width).
channel (int): Index of channel dimension. This dimension spans [0, image colors channels).
"""
TensorAxis5D = namedtuple("TensorAxis5D", ["batch", "time", "row", "column", "channel"])
class DataFormat(TAOObject):
"""
A wrapper class for handling Keras impicitly defined data_format enums.
Images are represented as 3D tensors in TF. Historically, NVIDIA libraries like CuDNN were
optimized to work on representations that store color information in the first
dimension (CHANNELS_FIRST) TensorFlow and other libraries often times store the color
informatoin in the last dimension (CHANNELS_LAST).
"""
CHANNELS_FIRST = "channels_first"
CHANNELS_LAST = "channels_last"
VALID_FORMATS = [CHANNELS_FIRST, CHANNELS_LAST]
@save_args
def __init__(self, data_format):
"""Construct DataFormat object.
Args:
data_format (str): Either 'channels_first' or 'channels_last' (i.e. NCHW vs NHWC).
Raises:
exception (ValueError): when invalid data_format is provided.
"""
super(DataFormat, self).__init__()
if data_format not in self.VALID_FORMATS:
raise ValueError("Unrecognized data_format '{}'.".format(data_format))
self._data_format = data_format
def __str__(self):
"""Return string representation compatible with Keras.
Returns:
data_format (str): String representation of this object.
"""
return self._data_format
def __eq__(self, other):
"""Compare two data format objecst for equality.
Returns:
equal (bool): True if objects represent the same value.
"""
return self._data_format == other._data_format
def convert_shape(self, shape, target_format):
"""Converts a shape tuple from one data format to another.
Args:
shape (list, tuple): The shape in this data format.
target_format (DataFormat): The desired data format.
Returns:
shape (list, tuple): The shape in the target format.
"""
if self._data_format == target_format._data_format:
return shape
assert 4 <= len(shape) <= 5
converted = [shape[0], shape[1], 0, 0]
if len(shape) == 4:
src_axis = self.axis_4d
dest_axis = target_format.axis_4d
else:
converted.append(0)
src_axis = self.axis_5d
dest_axis = target_format.axis_5d
converted[dest_axis.channel] = shape[src_axis.channel]
converted[dest_axis.row] = shape[src_axis.row]
converted[dest_axis.column] = shape[src_axis.column]
if isinstance(shape, tuple):
converted = tuple(converted)
return converted
@property
@lru_cache(maxsize=None)
def axis_4d(self):
"""Return data format dependent axes of 4D tensors.
Returns:
(TensorAxis4D): Axes of tensors for this data format.
"""
if self._data_format == self.CHANNELS_FIRST:
return TensorAxis4D(batch=0, channel=1, row=2, column=3)
return TensorAxis4D(batch=0, row=1, column=2, channel=3)
@property
@lru_cache(maxsize=None)
def axis_5d(self):
"""Return data format dependent axes of 5D tensors.
Returns:
(TensorAxis5D): Axes of tensors for this data format.
"""
if self._data_format == self.CHANNELS_FIRST:
return TensorAxis5D(batch=0, time=1, channel=2, row=3, column=4)
return TensorAxis5D(batch=0, time=1, row=2, column=3, channel=4)
def __hash__(self):
"""Get the hash for this data format."""
return hash(self._data_format)
CHANNELS_FIRST = DataFormat(DataFormat.CHANNELS_FIRST)
CHANNELS_LAST = DataFormat(DataFormat.CHANNELS_LAST)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/data_format.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader benchmark suite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import logging
import os
import sqlite3
import tempfile
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader import processors
from nvidia_tao_tf1.blocks.multi_source_loader.data_loader import DataLoader
from nvidia_tao_tf1.blocks.multi_source_loader.sources.sqlite_data_source import (
SqliteDataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.sources.synthetic_data_source import (
SyntheticDataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types import test_fixtures
def _generate_fp16_image(
export_path, sequence_id, camera_name, frame_number, height, width
):
"""Create files filled with zeros that look like exported fp16 images."""
FP16_BYTES_PER_CHANNEL = 2
size = height * width * 3 * FP16_BYTES_PER_CHANNEL
image = np.zeros(size, dtype=np.int8)
dir_path = os.path.join(export_path, sequence_id, camera_name)
try:
os.makedirs(dir_path)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
raise
path = os.path.join(dir_path, "{}.fp16".format(frame_number))
image.tofile(path)
def _fetch_frames_and_sequence(sqlite_path):
connection = sqlite3.connect(sqlite_path)
cursor = connection.cursor()
sequence_frames = cursor.execute(
"SELECT frame_number, session_uuid "
"FROM frames fr JOIN sequences seq ON fr.sequence_id=seq.id;"
)
return sequence_frames.fetchall()
class DataLoaderBenchmark(tf.test.Benchmark):
"""Data loader benchmark suite."""
SQLITE_PATH = (
"./moduluspy/modulus/dataloader/testdata/lane-assignment-RR-KPI_mini.sqlite"
)
ITERATIONS = 100
TRACE = False
MEMORY_USAGE = False
def _create_synthetic_dataset(self, image_width, image_height, batch_size):
"""
Build a synthetic datasource without any preprocessing/augmentation.
This synthetic data source is an in-memory dataset without any further processing,
the actual I/O on dataset is bypassed.
Args:
image_width (int): Image width to generate image.
image_height (int): Image height to generate image.
batch_size (int): Batch size.
Return:
(Example): A 3D example fetched from a dataset.
"""
# Create a synthetic datasource with 1 example and use dataset.repeat()
# to make it repeat forever.
data_source = SyntheticDataSource(
preprocessing=[processors.Crop(left=0, top=0, right=960, bottom=504)],
example_count=1,
template=test_fixtures.make_example_3d(image_height, image_width),
)
data_loader = DataLoader(
data_sources=[data_source],
augmentation_pipeline=[],
batch_size=batch_size,
preprocessing=[],
)
dataset = data_loader()
dataset = dataset.repeat()
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
return iterator.get_next()
def _create_sqlite_dataset(self, batch_size):
"""
Build a sqlite datasource without any preprocessing/augmentation.
Args:
batch_size (int): Batch size.
Return:
(Example): A 3D example fetched from a dataset.
"""
self.export_path = tempfile.mkdtemp()
frames_and_sequences = _fetch_frames_and_sequence(self.SQLITE_PATH)
for frame_number, sequence_uuid in frames_and_sequences:
_generate_fp16_image(
export_path=self.export_path,
sequence_id=sequence_uuid,
camera_name="video_B0_FC_60/rgb_half_dwsoftisp_v0.52b",
frame_number=str(frame_number),
height=604,
width=960,
)
source = SqliteDataSource(
sqlite_path=self.SQLITE_PATH,
image_dir=self.export_path,
export_format=SqliteDataSource.FORMAT_RGB_HALF_DWSOFTISP,
preprocessing=[processors.Crop(left=0, top=0, right=960, bottom=504)],
)
data_loader = DataLoader(
data_sources=[source],
augmentation_pipeline=[],
batch_size=batch_size,
preprocessing=[],
)
dataset = data_loader()
dataset = dataset.repeat()
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
return iterator.get_next()
def benchmark_synthetic_dataset_24batch(self):
"""Benchmark 604x960 image with batch_size 24 for 100 iterations."""
with tf.compat.v1.Session() as sess:
run_tensor = self._create_synthetic_dataset(960, 604, 24)
self.run_op_benchmark(
sess=sess,
op_or_tensor=run_tensor,
min_iters=self.ITERATIONS,
store_trace=self.TRACE,
store_memory_usage=self.MEMORY_USAGE,
)
def benchmark_sqlite_dataset_24batch(self):
"""Benchmark 604x960 image with batch_size 24 for 100 iterations."""
with tf.compat.v1.Session() as sess:
run_tensor = self._create_sqlite_dataset(24)
self.run_op_benchmark(
sess=sess,
op_or_tensor=run_tensor,
min_iters=self.ITERATIONS,
store_trace=self.TRACE,
store_memory_usage=self.MEMORY_USAGE,
)
def benchmark_sqlite_dataset_32batch(self):
"""Benchmark 604x960 image with batch_size 32 for 100 iterations."""
with tf.compat.v1.Session() as sess:
run_tensor = self._create_sqlite_dataset(32)
self.run_op_benchmark(
sess=sess,
op_or_tensor=run_tensor,
min_iters=self.ITERATIONS,
store_trace=self.TRACE,
store_memory_usage=self.MEMORY_USAGE,
)
def benchmark_synthetic_dataset_none_32batch(self):
"""Benchmark 604x960 image with batch_size 32 for 100 iterations."""
sess = tf.compat.v1.Session()
run_tensor = self._create_synthetic_dataset(960, 604, 32)
self.run_op_benchmark(
sess=sess,
op_or_tensor=run_tensor,
min_iters=self.ITERATIONS,
store_trace=self.TRACE,
store_memory_usage=self.MEMORY_USAGE,
)
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf.test.main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/data_loader_benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for shape of the images/frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
FrameShape = namedtuple("FrameShape", ["height", "width", "channels"])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/frame_shape.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DataLoader in eager mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from parameterized import parameterized
import tensorflow as tf
tf.compat.v1.enable_eager_execution() # noqa
from nvidia_tao_tf1.blocks.multi_source_loader import processors
from nvidia_tao_tf1.blocks.multi_source_loader.data_loader import DataLoader
from nvidia_tao_tf1.blocks.multi_source_loader.sources.synthetic_data_source import (
SyntheticDataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types import FEATURE_CAMERA
from nvidia_tao_tf1.blocks.multi_source_loader.types import test_fixtures
class DataLoaderTest(tf.test.TestCase):
"""Test the DataLoader in eager mode."""
@parameterized.expand(
[[1, 1, 1], [2, 1, 2], [3, 1, 3], [4, 2, 1], [5, 2, 2], [6, 2, 3]]
)
def test_iterates_all_examples(self, batch_count, batch_size, window_size):
example_count = batch_count * batch_size * window_size
dataloader = DataLoader(
data_sources=[
SyntheticDataSource(
preprocessing=[],
example_count=example_count,
template=test_fixtures.make_example_3d(12, 24),
)
],
augmentation_pipeline=[],
batch_size=batch_size,
shuffle=False,
repeat=False,
sampling=None,
preprocessing=[processors.TemporalBatcher(size=window_size)],
)
dataloader.set_shard()
batches_read = 0
for example in dataloader():
batches_read += 1
self.assertEqual(
(batch_size, window_size, 3, 12, 24),
example.instances[FEATURE_CAMERA].images.shape,
)
self.assertEqual(batch_count, batches_read)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/data_loader_eager_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DataLoader object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
from mock import patch
from parameterized import parameterized
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader import processors
from nvidia_tao_tf1.blocks.multi_source_loader.data_loader import (
_normalize_images,
_pick_largest_image_dtype,
DataLoader,
)
from nvidia_tao_tf1.blocks.multi_source_loader.sources.data_source import (
DataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.sources.synthetic_data_source import (
SyntheticDataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
FEATURE_CAMERA,
Images2D,
SequenceExample,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types import test_fixtures
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object, register_tao_function
class FP16DataSource(DataSource):
"""Mocked DataSource with image dtype tf.float16."""
def call(self):
pass
@property
def image_dtype(self):
return tf.float16
def __len__(self):
return 1
class FP32DataSource(DataSource):
"""Mocked DataSource with image dtype tf.float32."""
def call(self):
pass
@property
def image_dtype(self):
return tf.float32
def __len__(self):
return 1
class UINT8DataSource(DataSource):
"""Mocked DataSource with image dtype tf.uint8."""
def call(self):
pass
@property
def image_dtype(self):
return tf.uint8
def __len__(self):
return 1
def return_args(args):
return args
@register_tao_function
def _synthetic_data_source_template_fn():
return test_fixtures.make_example_3d(height=17, width=19, label_name="LABEL_A")
@pytest.fixture(scope="function")
@patch(
"nvidia_tao_tf1.blocks.multi_source_loader."
"sources.tfrecords_data_source.TFRecordsDataSource"
)
def _data_sources(mocked_tfrecords_source):
# we assume this data_source has 128 frames
mocked_tfrecords_source.__len__.return_value = 128
mocked_tfrecords_source.get_image_properties.return_value = 980, 604
data_sources = []
data_sources.append(mocked_tfrecords_source)
return data_sources
def _datasource_mock(width=980, height=604):
return mock.Mock(__len__=lambda _: 42, get_image_properties=lambda: (width, height))
def test_defaults_to_single_shard(_data_sources):
dataloader = DataLoader(
_data_sources, augmentation_pipeline=[], batch_size_per_gpu=32
)
dataloader.set_shard()
assert dataloader.batch_size_per_gpu == 32
assert dataloader.steps == 4
def test_two_shards_halves_batch_size(_data_sources):
dataloader = DataLoader(
data_sources=_data_sources,
augmentation_pipeline=[],
batch_size=32,
shuffle=True,
)
dataloader.set_shard(shard_count=2, shard_index=0)
assert dataloader.batch_size_per_gpu == 16
def test_two_shards_same_steps(_data_sources):
dataloader = DataLoader(
data_sources=_data_sources,
augmentation_pipeline=[],
batch_size=32,
shuffle=True,
)
dataloader.set_shard(shard_count=2, shard_index=0)
assert dataloader.steps == 4
def test_shard_count_less_than_one_raises(_data_sources):
dataloader = DataLoader(
data_sources=_data_sources,
augmentation_pipeline=[],
batch_size=32,
shuffle=True,
)
with pytest.raises(ValueError):
dataloader.set_shard(shard_count=0, shard_index=0)
def test_shard_index_outside_of_range_raises(_data_sources):
dataloader = DataLoader(
data_sources=_data_sources,
augmentation_pipeline=[],
batch_size=32,
shuffle=True,
)
with pytest.raises(ValueError):
dataloader.set_shard(shard_count=2, shard_index=2)
class DataLoaderTest(tf.test.TestCase):
"""Test the DataLoader."""
def test_no_data_source(self):
"""Test that, if no sources are provided, the correct exception is raised."""
with pytest.raises(ValueError):
DataLoader(data_sources=[], augmentation_pipeline=[], batch_size=1)
@parameterized.expand([(1, [1.0]), (2, [0.5, 0.5])])
@patch(
"nvidia_tao_tf1.blocks.multi_source_loader."
"data_loader.tf.data.experimental.sample_from_datasets"
)
def test_data_source_combination_uniform_sampling(
self, number_of_sources, expected_weights, mocked_sample_from_datasets
):
"""Test that the DataLoader interleaves the sources correctly with uniform sampling."""
sources = []
for _ in range(number_of_sources):
sources.append(
SyntheticDataSource(
preprocessing=[],
example_count=1,
template=test_fixtures.make_example_3d(12, 24),
)
)
dataloader = DataLoader(
data_sources=sources, augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
assert len(dataloader) == number_of_sources
dataloader()
mocked_sample_from_datasets.assert_called_once()
call_args = mocked_sample_from_datasets.call_args
weights = call_args[1]["weights"]
assert weights == expected_weights
@patch(
"nvidia_tao_tf1.blocks.multi_source_loader."
"data_loader.tf.data.experimental.sample_from_datasets"
)
def test_data_source_combination_specific_sampling(
self, mocked_sample_from_datasets
):
"""Test that the DataLoader interleaves the sources correctly with given sampling ratios."""
ratios = [0.2, 0.8]
sources = [
SyntheticDataSource(
preprocessing=[],
example_count=1,
template=test_fixtures.make_example_3d(12, 24),
sample_ratio=ratio,
)
for ratio in ratios
]
dataloader = DataLoader(
data_sources=sources, augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
assert dataloader._temporal_size is None
assert len(dataloader) == len(ratios)
dataloader()
mocked_sample_from_datasets.assert_called_once()
call_args = mocked_sample_from_datasets.call_args
weights = call_args[1]["weights"]
assert weights == [0.2, 0.8]
@parameterized.expand(
[("proportional", [1.0 / 6.0, 1.0 / 3.0, 0.5]), ("uniform", [1.0 / 3.0] * 3)]
)
@patch(
"nvidia_tao_tf1.blocks.multi_source_loader."
"data_loader.tf.data.experimental.sample_from_datasets"
)
def test_data_source_default_sampling_modes(
self, sampling, expected_ratios, mocked_sample_from_datasets
):
"""Test that the DataLoader sets up expected sampling ratios for supported modes."""
sources = [
SyntheticDataSource(
preprocessing=[],
example_count=i + 1,
template=test_fixtures.make_example_3d(12, 24),
)
for i in range(3)
]
dataloader = DataLoader(
data_sources=sources,
augmentation_pipeline=[],
batch_size=1,
sampling=sampling,
)
dataloader.set_shard()
dataloader()
mocked_sample_from_datasets.assert_called_once()
call_args = mocked_sample_from_datasets.call_args
weights = call_args[1]["weights"]
self.assertAllClose(weights, expected_ratios)
def test_mixing_transform_and_nontransform_processors_does_not_fail(self):
"""Test that the dataloader works with source and dataloader processors."""
window_size = 1
batch_size = 1
source = SyntheticDataSource(
preprocessing=[processors.Scale(height=6, width=12)],
example_count=1,
template=test_fixtures.make_example_3d(12, 24),
)
dataloader = DataLoader(
data_sources=[source],
augmentation_pipeline=[
processors.RandomZoom(),
processors.RandomGaussianBlur(probability=1.0),
],
preprocessing=[processors.TemporalBatcher(size=window_size)],
batch_size=batch_size,
)
dataloader.set_shard()
with self.cached_session() as sess:
batch = dataloader()
# First, initialize the iterator.
sess.run(tf.compat.v1.get_collection("iterator_init"))
example = sess.run(batch)
assert (batch_size, window_size, 3, 6, 12) == example.instances[
FEATURE_CAMERA
].images.shape
@parameterized.expand([(1, [2]), (3, [1, 2, 3])])
@patch(
"nvidia_tao_tf1.blocks.multi_source_loader.data_loader.tf.data.Dataset.concatenate"
)
def test_data_source_concatenation_calls(
self, number_of_sources, example_count, mocked_concatenate
):
mocked_concatenate.side_effect = return_args
sources = []
for idx in range(number_of_sources):
sources.append(
SyntheticDataSource(
preprocessing=[],
example_count=example_count[idx],
template=test_fixtures.make_example_3d(12, 24),
)
)
dataloader = DataLoader(
data_sources=sources,
augmentation_pipeline=[],
batch_size=1,
shuffle=False,
repeat=False,
sampling=None,
)
dataloader.set_shard()
assert len(dataloader) == sum(
[len(source) for source in dataloader._data_sources]
)
dataloader()
assert mocked_concatenate.call_count == number_of_sources - 1
def test_data_source_concatenation_number_examples(self):
sources = []
number_of_sources = 3
example_count = [1, 2, 3]
for idx in range(number_of_sources):
sources.append(
SyntheticDataSource(
preprocessing=[],
example_count=example_count[idx],
template=test_fixtures.make_example_3d(12, 24),
)
)
dataloader = DataLoader(
data_sources=sources,
augmentation_pipeline=[],
batch_size=1,
shuffle=False,
repeat=False,
sampling=None,
)
dataloader.set_shard()
number_of_examples = 0
with self.cached_session() as sess:
batch = dataloader()
# First, initialize the iterator.
sess.run(tf.compat.v1.get_collection("iterator_init"))
while True:
try:
sess.run(batch)
number_of_examples += 1
except tf.errors.OutOfRangeError:
break
assert number_of_examples == len(dataloader)
@parameterized.expand([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
def test_produces_5d_examples(self, batch_size, window_size):
source = SyntheticDataSource(
preprocessing=[],
example_count=window_size * 2,
template=test_fixtures.make_example_3d(12, 24),
)
dataloader = DataLoader(
data_sources=[source],
augmentation_pipeline=[],
batch_size=batch_size,
preprocessing=[processors.TemporalBatcher(size=window_size)],
)
dataloader.set_shard()
assert dataloader._temporal_size == window_size
assert len(dataloader) == 2
with self.cached_session() as sess:
batch = dataloader()
# First, initialize the iterator.
sess.run(tf.compat.v1.get_collection("iterator_init"))
example = sess.run(batch)
assert (batch_size, window_size, 3, 12, 24) == example.instances[
FEATURE_CAMERA
].images.shape
def test_invalid_dataset_size(self):
window_size = 6
batch_size = 2
source = SyntheticDataSource(
preprocessing=[],
example_count=window_size - 1,
template=test_fixtures.make_example_3d(12, 24),
)
dataloader = DataLoader(
data_sources=[source],
augmentation_pipeline=[],
batch_size=batch_size,
preprocessing=[processors.TemporalBatcher(size=window_size)],
)
with self.assertRaises(ValueError):
dataloader.set_shard()
def test_produces_4d_examples_when_window_size_not_set(self):
source = SyntheticDataSource(
preprocessing=[],
example_count=1,
template=test_fixtures.make_example_3d(12, 24),
)
dataloader = DataLoader(
data_sources=[source], augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
example = dataloader()
assert (1, 3, 12, 24) == example.instances[FEATURE_CAMERA].images.shape
def test_normalize_images(self):
images = tf.zeros([2, 2, 2], dtype=tf.uint8)
example = SequenceExample(
instances={
FEATURE_CAMERA: Images2D(images=images, canvas_shape=mock.Mock())
},
labels=mock.Mock(),
)
_normalize_images(example)
assert example.instances[FEATURE_CAMERA].images.dtype == tf.float32
with self.cached_session() as sess:
normalized_images = sess.run(example.instances[FEATURE_CAMERA].images)
self.assertAllInRange(normalized_images, 0.0, 1.0)
@parameterized.expand(
[
[[UINT8DataSource()], tf.uint8],
[[FP16DataSource()], tf.float16],
[[FP32DataSource()], tf.float32],
[[UINT8DataSource(), UINT8DataSource(), UINT8DataSource()], tf.uint8],
[[FP16DataSource(), FP16DataSource(), FP16DataSource()], tf.float16],
[[FP32DataSource(), FP32DataSource(), FP32DataSource()], tf.float32],
[[FP16DataSource(), FP32DataSource(), UINT8DataSource()], tf.float32],
[[UINT8DataSource(), UINT8DataSource(), FP16DataSource()], tf.float16],
[[FP32DataSource(), FP16DataSource(), FP32DataSource()], tf.float32],
]
)
def test_pick_image_dtype(self, data_sources, expected_dtype):
dtype = _pick_largest_image_dtype(data_sources)
assert expected_dtype == dtype
def test_stringifies(self):
source = _datasource_mock()
dataloader = DataLoader(
data_sources=[source], augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
self.assertEqual(
" - examples: 42\n"
" - steps: 42\n"
" - batch size per gpu: 1\n"
" - shuffle: True\n"
" - shard count: 1\n"
" - shard index: 0\n"
" - pseudo-sharding: False\n"
" - serial augmentation: False\n"
" - sources:\n"
" Source 0: Mock\n",
str(dataloader),
)
def test_normalizes_image_sizes(self):
mocks = [_datasource_mock(64, 504), _datasource_mock(128, 42)]
# The data loader should query each data source, and gather their image
# dimensions. Finally, it should tell each what the max dimensions are
# across all data sources.
dataloader = DataLoader(
data_sources=mocks, augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
for ds_mock in mocks:
ds_mock.set_image_properties.assert_called_once_with(128, 504)
@mock.patch("nvidia_tao_tf1.blocks.multi_source_loader.data_loader.print")
def test_summarizes_to_stdout(self, mocked_print):
dataloader = DataLoader(
data_sources=[_datasource_mock()], augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
dataloader.summary()
mocked_print.assert_has_calls(
[
mock.call(" - examples: 42"),
mock.call(" - steps: 42"),
mock.call(" - batch size per gpu: 1"),
mock.call(" - shuffle: True"),
mock.call(" - shard count: 1"),
mock.call(" - shard index: 0"),
mock.call(" - pseudo-sharding: False"),
mock.call(" - serial augmentation: False"),
mock.call(" - sources:"),
]
)
def test_summarizes_to_print_fn(self):
dataloader = DataLoader(
data_sources=[_datasource_mock()], augmentation_pipeline=[], batch_size=1
)
dataloader.set_shard()
print_fn = mock.Mock()
dataloader.summary(print_fn=print_fn)
print_fn.assert_has_calls(
[
mock.call(" - examples: 42"),
mock.call(" - steps: 42"),
mock.call(" - batch size per gpu: 1"),
mock.call(" - shuffle: True"),
mock.call(" - shard count: 1"),
mock.call(" - shard index: 0"),
mock.call(" - pseudo-sharding: False"),
mock.call(" - serial augmentation: False"),
mock.call(" - sources:"),
]
)
@parameterized.expand([(7, True), (13, False), (16, True)])
def test_serialization_roundtrip(self, batch_size, shuffle):
_data_sources = []
for _ in range(2):
_data_sources.append(
SyntheticDataSource(
example_count=3, # some prime number.
template_fn=_synthetic_data_source_template_fn,
tracker_dict=dict(),
)
)
dataloader = DataLoader(
data_sources=_data_sources,
augmentation_pipeline=[],
batch_size=batch_size,
shuffle=shuffle,
)
dataloader.set_shard()
s = dataloader.serialize()
dataloader_2 = deserialize_tao_object(s)
dataloader_2.set_shard()
# Check some properties / attributes are as expected.
assert dataloader_2.steps == dataloader.steps
assert dataloader_2.batch_size_per_gpu == dataloader.batch_size_per_gpu
assert len(dataloader_2) == len(dataloader)
@mock.patch(
"nvidia_tao_tf1.blocks.multi_source_loader.data_loader.DataLoader._configure_sources"
)
def test_set_shard(self, mocked_configure_sources):
sources = []
for _ in range(2):
sources.append(
SyntheticDataSource(
example_count=16, # some prime number.
template_fn=_synthetic_data_source_template_fn,
tracker_dict=dict(),
)
)
dataloader = DataLoader(
data_sources=sources, augmentation_pipeline=[], batch_size=16
)
dataloader.set_shard(shard_count=4, shard_index=3)
assert dataloader._shard_index == 3
assert dataloader._shard_count == 4
assert dataloader._batch_size_per_gpu == 4
assert dataloader.steps == 2
assert len(dataloader) == 32
mocked_configure_sources.assert_called_once()
def test_set_shard_adjust_on_batch_size(self):
sources = []
batch_size = 16
shard_count = 8
for _ in range(2):
sources.append(
SyntheticDataSource(
example_count=16, # some even number.
template_fn=_synthetic_data_source_template_fn,
tracker_dict=dict(),
)
)
dataloader = DataLoader(
data_sources=sources, augmentation_pipeline=[], batch_size=batch_size
)
# When batch_size is set,
# dataloader.batch_size = batch_size / shard_count.
dataloader.set_shard(shard_count=shard_count, shard_index=0)
assert batch_size / shard_count == dataloader.batch_size_per_gpu
def test_set_shard_adjust_on_batch_size_per_gpu(self):
sources = []
batch_size_per_gpu = 4
for _ in range(2):
sources.append(
SyntheticDataSource(
example_count=16, # some even number.
template_fn=_synthetic_data_source_template_fn,
tracker_dict=dict(),
)
)
dataloader = DataLoader(
data_sources=sources,
augmentation_pipeline=[],
batch_size_per_gpu=batch_size_per_gpu,
)
# When batch_size_per_gpu is set,
# dataloader.batch_size = batch_size_per_gpu.
dataloader.set_shard(shard_count=8, shard_index=0)
assert batch_size_per_gpu == dataloader.batch_size_per_gpu
def test_raises_error_calling_to_len_before_set_shard(self):
dataloader = DataLoader(
data_sources=[_datasource_mock()], augmentation_pipeline=[], batch_size=1
)
with self.assertRaises(ValueError):
len(dataloader)
def test_raises_error_calling_steps_before_set_shard(self):
dataloader = DataLoader(
data_sources=[_datasource_mock()], augmentation_pipeline=[], batch_size=1
)
with self.assertRaises(ValueError):
dataloader.steps
@parameterized.expand(
[
(-1, -1, "Exactly one of batch_size and batch_size_per_gpu must be set."),
(1, 1, "Exactly one of batch_size and batch_size_per_gpu must be set."),
(1, -1, "Exactly one of batch_size and batch_size_per_gpu must be set."),
(-1, 1, "Exactly one of batch_size and batch_size_per_gpu must be set."),
(
None,
None,
"Exactly one of batch_size and batch_size_per_gpu must be set.",
),
(None, -1, ".* must be positive."),
(None, -1, ".* must be positive."),
(None, 0, ".* must be positive."),
(0, None, ".* must be positive."),
]
)
def test_raise_error_with_illegal_batch_size(
self, batch_size, batch_size_per_gpu, message
):
with self.assertRaisesRegexp(ValueError, message):
DataLoader(
data_sources=[_datasource_mock()],
augmentation_pipeline=[],
batch_size=batch_size,
batch_size_per_gpu=batch_size_per_gpu,
)
def test_raise_error_when_neither_batch_size_is_set(self):
message = "Exactly one of batch_size and batch_size_per_gpu must be set."
with self.assertRaisesRegexp(ValueError, message):
DataLoader(data_sources=[_datasource_mock()], augmentation_pipeline=[])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/data_loader_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for test fixtures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from parameterized import parameterized
import tensorflow as tf
import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures
class TestFixturesTest(tf.test.TestCase):
def test_tags(self):
tags = fixtures.make_tags([[[[1]]]])
with self.session() as sess:
tags = sess.run(tags)
self.assertAllEqual([[0, 0, 0, 0]], tags.indices)
self.assertAllEqual([1], tags.values)
@parameterized.expand(
[
[
[[1]],
5,
10,
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 2, 0],
[0, 0, 0, 2, 1],
[0, 0, 0, 3, 0],
[0, 0, 0, 3, 1],
[0, 0, 0, 4, 0],
[0, 0, 0, 4, 1],
],
[42],
[[0, 0, 0, 0]],
[7],
[[0, 0, 0, 0]],
],
[
[[1, 1]],
2,
8,
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 1],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 1, 0, 1, 1],
],
[42, 42],
[[0, 0, 0, 0], [0, 1, 0, 0]],
[7, 7],
[[0, 0, 0, 0], [0, 1, 0, 0]],
],
]
)
def test_polygon2d_labels(
self,
shapes_per_frame,
coordinates_per_polygon,
expected_total_coordinates,
expected_coordinate_indices,
expected_classes,
expected_class_indices,
expected_attributes,
expected_attribute_indices,
):
with self.session() as sess:
actual = sess.run(
fixtures.make_polygon2d_label(
shapes_per_frame, [42], [7], 128, 248, coordinates_per_polygon
)
)
self.assertAllEqual(
expected_coordinate_indices, actual.vertices.coordinates.indices
)
self.assertAllEqual(
expected_total_coordinates, len(actual.vertices.coordinates.values)
)
self.assertAllEqual(expected_class_indices, actual.classes.indices)
self.assertAllEqual(expected_classes, actual.classes.values)
self.assertAllEqual(expected_attribute_indices, actual.attributes.indices)
self.assertAllEqual(expected_attributes, actual.attributes.values)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/test_fixtures_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for Coordinates2D."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from parameterized import parameterized
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
test_fixtures as fixtures,
)
from nvidia_tao_tf1.core.types import Transform
class Coordinates2DTest(tf.test.TestCase):
@parameterized.expand(
[
[[[1]]],
[[[1, 2]]],
[[[1], [2]]],
[[[1, 2], [2, 3]]],
[[[1, 2, 3], [4, 5, 6]]],
]
)
def test_apply_succeeds(self, shapes_per_frame):
with self.session() as sess:
example_count = len(shapes_per_frame)
coordinates = fixtures.make_coordinates2d(
shapes_per_frame=shapes_per_frame, height=604, width=960
)
transform = fixtures.make_identity_transform(
example_count,
604,
960,
timesteps=coordinates.canvas_shape.width.shape[1],
)
transformed = coordinates.apply(transform)
self.assertAllClose(
sess.run(coordinates.coordinates), sess.run(transformed.coordinates)
)
self.assertAllClose(
sess.run(coordinates.canvas_shape), sess.run(transformed.canvas_shape)
)
@parameterized.expand(
[
[[[1]], [(5, 10)]], # 1 example with 1 frame and 1 shape:
# translate x by 5, y by 10 pixels
[
[[1, 2]],
[(5, 10)],
], # 1 example with 2 frames. First frame has 1 shape second has 2.
# translate x by 5, y by 10 pixels
[[[1], [2]], [(5, 10), (7, 42)]],
[[[1, 2], [2, 3]], [(10, 5), (42, 7)]],
[
[[1, 2, 3], [4, 5, 6]],
[(3, 6), (7, 14)],
], # 2 examples each with 3 frames. Translate
# first example x by 3 and y by 6 pixels
# second example x by 7 and y by 14 pixels
]
)
def test_applies_translations_per_example(
self, shapes_per_frame, per_example_translations
):
with self.session() as sess:
example_count = len(shapes_per_frame)
translate_count = len(per_example_translations)
assert example_count == translate_count
height = 604
width = 960
transform = fixtures.make_identity_transform(example_count, height, width)
def make_translate_matrix(x, y):
return tf.constant([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [x, y, 1.0]])
transform = Transform(
canvas_shape=fixtures.make_canvas2d(example_count, height, width),
color_transform_matrix=tf.stack(
[tf.eye(4, dtype=tf.float32) for _ in range(example_count)]
),
spatial_transform_matrix=tf.stack(
[make_translate_matrix(x, y) for x, y in per_example_translations]
),
)
coordinates = fixtures.make_coordinates2d(
shapes_per_frame=shapes_per_frame, height=height, width=width
)
transformed = coordinates.apply(transform)
offset_accumulator = 0
for example_index, example_frame_shapes in enumerate(shapes_per_frame):
coordinate_count = 3 * sum(example_frame_shapes)
start_offset = offset_accumulator
end_offset = offset_accumulator + coordinate_count
x_translate = per_example_translations[example_index][0]
y_translate = per_example_translations[example_index][1]
x_expected = (
tf.reshape(coordinates.coordinates.values, (-1, 2))[
start_offset:end_offset, 0
]
- x_translate
)
x_transformed = tf.reshape(transformed.coordinates.values, (-1, 2))[
start_offset:end_offset, 0
]
self.assertAllClose(
sess.run(x_expected), sess.run(x_transformed), rtol=1e-3, atol=1e-3
)
y_expected = (
tf.reshape(coordinates.coordinates.values, (-1, 2))[
start_offset:end_offset, 1
]
- y_translate
)
y_transformed = tf.reshape(transformed.coordinates.values, (-1, 2))[
start_offset:end_offset, 1
]
self.assertAllClose(
sess.run(y_expected), sess.run(y_transformed), rtol=1e-3, atol=1e-3
)
offset_accumulator += coordinate_count
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/coordinates2d_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TransformedExample examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from nvidia_tao_tf1.blocks.multi_source_loader.types.transformed_example import (
TransformedExample,
)
from nvidia_tao_tf1.core.types import Example
FEATURE_CAMERA = "CAMERA"
FEATURE_SESSION = "SESSION"
LABEL_MAP = "MAP"
LABEL_OBJECT = "OBJECT"
LABEL_FREESPACE_REGRESSION = LABEL_MAP
# TODO(ehall): Add a configurable mapping from POLYGON->LABEL_FREESPACE_SEGMENTATION
LABEL_FREESPACE_SEGMENTATION = LABEL_MAP
# TODO(ehall): Add a configurable mapping from POLYGON->LABEL_PANOPTIC_SEGMENTATION
LABEL_PANOPTIC_SEGMENTATION = LABEL_MAP
# TODO(vkallioniemi): Add a configurable mapping from POLYLINE->LABEL_PATH
LABEL_PATH = LABEL_MAP
LABEL_DEPTH_FREESPACE = "DEPTH_FREESPACE"
LABEL_DEPTH_DENSE_MAP = "DEPTH_DENSE_MAP"
# This class and the associated TransformedExample class borrow ideas from the "lift-lower" pattern
# presented in this podcast:
# https://lispcast.com/a-cool-functional-programming-pattern-do-you-know-what-to-call-it/
#
# The basic idea of the pattern is to temporarily transform values to a richer type that makes
# manipulating them easier:
# 1. Lift the type to a richer type (TransformedExample) to make transformations easier.
# 2. Perform transforms on the TransformedExample
# 3. Lower the type back to the original type when apply gets called.
class SequenceExample(namedtuple("SequenceExample", Example._fields)):
"""SequenceExample - a collection of features and labels passed to the model + loss.
Args:
instances (dict): Data that will be transformed to features that are input to a model.
labels (dict): Labels are transformed to targets that work as inputs to a model loss.
"""
def transform(self, transformation):
"""Delayed transform of this value."""
return TransformedExample(example=self, transformation=transformation)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/sequence_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test functions that process front / back markers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.process_markers import (
FRONT_BACK_TOLERANCE,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.process_markers import (
map_markers_to_orientations,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.process_markers import (
map_orientation_to_markers,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.process_markers import (
SIDE_ONLY_TOLERANCE,
)
TEST_INVALID_ORIENTATION = -123.0
# First, define cases that can be used for both translation functions.
_COMMON_CASES = [
# front_markers, back_markers, ref_angle, orientations
# Front + right side.
([0.5, 0.4], [0.0] * 2, 0.0, False, [-0.75 * np.pi, -0.8 * np.pi]),
# Front + left side.
(
[0.2, 0.5, 0.71],
[1.0] * 3,
0.0,
False,
[0.6 * np.pi, 0.75 * np.pi, 0.855 * np.pi],
),
# Back + right side.
([1.0] * 2, [0.4, 0.7], 0.0, False, [-0.3 * np.pi, -0.15 * np.pi]),
# Back + left side.
([0.0] * 3, [0.1, 0.3, 0.8], 0.0, False, [0.05 * np.pi, 0.15 * np.pi, 0.4 * np.pi]),
# Left only.
([0.0] * 5, [1.0] * 5, 0.0, False, [np.pi / 2.0] * 5),
# Right only.
([1.0] * 5, [0.0] * 5, 0.0, False, [-np.pi / 2.0] * 5),
# Legacy (rumpy) test cases.
# Left only.
([0.0] * 5, [1.0] * 5, -np.pi / 2.0, True, [np.pi] * 5),
# Back + left side.
(
[0.0] * 3,
[0.1, 0.3, 0.8],
-np.pi / 2.0,
True,
[-0.55 * np.pi, -0.65 * np.pi, -0.9 * np.pi],
),
# Back + right side.
([1.0] * 2, [0.4, 0.7], -np.pi / 2.0, True, [-0.2 * np.pi, -0.35 * np.pi]),
# Front + right side.
([0.5, 0.4], [0.0] * 2, -np.pi / 2.0, True, [0.25 * np.pi, 0.3 * np.pi]),
# Front + left side.
(
[0.2, 0.5, 0.71],
[1.0] * 3,
-np.pi / 2.0,
True,
[0.9 * np.pi, 0.75 * np.pi, 0.645 * np.pi],
),
]
@pytest.mark.parametrize(
"front_markers,back_markers,ref_angle,clockwise,expected_orientations",
[
# Illegal / nonsensical marker values map to TEST_INVALID_ORIENTATION.
(
[-0.5, 5.0, 0.5],
[0.0, -5.0, -1.0],
0.0,
False,
[TEST_INVALID_ORIENTATION] * 3,
),
# Same test but with values that should be rounded.
([0.005, 0.991], [0.009, 0.996], 0.0, False, [TEST_INVALID_ORIENTATION] * 2),
# Invalid and valid values are both present. Valid value is front + right.
([0.0, 0.4], [0.0] * 2, 0.0, False, [TEST_INVALID_ORIENTATION, -0.8 * np.pi]),
# Back only: 0.0.
([-1.0] * 2, [0.0, 1.0], 0.0, False, [0.0] * 2),
# Front only: pi (180 degrees).
([0.0, 1.0], [-1.0] * 2, 0.0, False, [np.pi] * 2),
# Back only, rumpy legacy system.
([-1.0] * 2, [0.0, 1.0], -np.pi / 2.0, True, [-np.pi / 2.0] * 2),
]
+ _COMMON_CASES,
)
def test_markers_to_orientations(
front_markers, back_markers, ref_angle, clockwise, expected_orientations
):
"""Test that the translation from (front_marker, back_marker) to orientations is correct."""
# First, get tensors for the markers.
front_markers_tensor, back_markers_tensor = map(
tf.constant, [front_markers, back_markers]
)
# Then, translate to orientation values.
orientations_tensor = map_markers_to_orientations(
front_markers=front_markers_tensor,
invalid_orientation=TEST_INVALID_ORIENTATION,
back_markers=back_markers_tensor,
ref_angle=ref_angle,
clockwise=clockwise,
)
# Evaluate.
with tf.compat.v1.Session() as session:
orientations = session.run(orientations_tensor)
# Check values are correct.
assert np.allclose(orientations, expected_orientations)
@pytest.mark.parametrize(
"expected_front_markers,expected_back_markers,ref_angle,clockwise,orientations",
[
# Front only. Here, we are slightly below the tolerance value away from the ref_angle.
(
[-1.0] * 2,
[0.0] * 2,
0.0,
True,
[-0.99 * FRONT_BACK_TOLERANCE, 0.99 * FRONT_BACK_TOLERANCE],
),
# Same, but anticlockwise.
(
[-1.0] * 2,
[0.0] * 2,
0.0,
False,
[-0.99 * FRONT_BACK_TOLERANCE, 0.99 * FRONT_BACK_TOLERANCE],
),
# Same, but with Rumpy scheme.
(
[-1.0] * 2,
[0.0] * 2,
-np.pi / 2.0,
True,
[
-np.pi / 2.0 - 0.99 * FRONT_BACK_TOLERANCE,
-np.pi / 2.0 + 0.99 * FRONT_BACK_TOLERANCE,
],
),
# Back only.
(
[0.0] * 2,
[-1.0] * 2,
0.0,
True,
[np.pi - 0.99 * FRONT_BACK_TOLERANCE, np.pi + 0.99 * FRONT_BACK_TOLERANCE],
),
# Same, but anticlockwise.
(
[0.0] * 2,
[-1.0] * 2,
0.0,
False,
[np.pi - 0.99 * FRONT_BACK_TOLERANCE, -np.pi + 0.99 * FRONT_BACK_TOLERANCE],
),
# Same, but with Rumpy scheme.
(
[0.0] * 2,
[-1.0] * 2,
-np.pi / 2.0,
True,
[
np.pi / 2.0 - 0.99 * FRONT_BACK_TOLERANCE,
np.pi / 2.0 + 0.99 * FRONT_BACK_TOLERANCE,
],
),
# Left only.
(
[0.0] * 2,
[1.0] * 2,
0.0,
False,
[
np.pi / 2.0 - 0.99 * SIDE_ONLY_TOLERANCE,
np.pi / 2.0 + 0.99 * SIDE_ONLY_TOLERANCE,
],
),
# Same, but clockwise.
(
[0.0] * 2,
[1.0] * 2,
0.0,
True,
[
-np.pi / 2.0 - 0.99 * SIDE_ONLY_TOLERANCE,
-np.pi / 2.0 + 0.99 * SIDE_ONLY_TOLERANCE,
],
),
# Same, but with Rumpy scheme.
(
[0.0] * 2,
[1.0] * 2,
-np.pi / 2.0,
True,
[np.pi - 0.99 * SIDE_ONLY_TOLERANCE, -np.pi + 0.99 * SIDE_ONLY_TOLERANCE],
),
# Right only.
(
[1.0] * 2,
[0.0] * 2,
0.0,
False,
[
-np.pi / 2.0 - 0.99 * SIDE_ONLY_TOLERANCE,
-np.pi / 2.0 + 0.99 * SIDE_ONLY_TOLERANCE,
],
),
# Same, but clockwise.
(
[1.0] * 2,
[0.0] * 2,
0.0,
True,
[
np.pi / 2.0 - 0.99 * SIDE_ONLY_TOLERANCE,
np.pi / 2.0 + 0.99 * SIDE_ONLY_TOLERANCE,
],
),
# Same, but with Rumpy scheme.
(
[1.0] * 2,
[0.0] * 2,
-np.pi / 2.0,
True,
[-0.99 * SIDE_ONLY_TOLERANCE, 0.99 * SIDE_ONLY_TOLERANCE],
),
]
+ _COMMON_CASES,
)
def test_map_orientation_to_markers(
expected_front_markers, expected_back_markers, ref_angle, clockwise, orientations
):
"""Test that map_orientation_to_markers translates orientation back to markers correctly."""
front_markers, back_markers = zip(
*map(
lambda x: map_orientation_to_markers(x, ref_angle, clockwise), orientations
)
)
assert np.allclose(front_markers, expected_front_markers)
assert np.allclose(back_markers, expected_back_markers)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/process_markers_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test of tf.io.decode_image function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from PIL import Image
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d_reference import (
decode_image
)
def test_jpeg_1_as_1():
"""test of decoding grayscale jpeg image as grayscale."""
# generate a random image
f, frame_path = tempfile.mkstemp(suffix=".jpg")
os.close(f)
image_data = np.random.randint(0, 256, (10, 10), dtype=np.uint8)
random_image = Image.fromarray(image_data)
random_image.save(frame_path)
# read the image
image_data = np.array(Image.open(frame_path))
data = tf.io.read_file(frame_path)
image = decode_image(data, channels=1, extension=".jpg")
with tf.Session() as sess:
image_data_tf = sess.run(image)[..., 0]
assert np.array_equal(image_data, image_data_tf)
def test_png_1_as_1():
"""test of decoding grayscale png image as grayscale."""
# generate a random image
f, frame_path = tempfile.mkstemp(suffix=".png")
os.close(f)
image_data = np.random.randint(0, 256, (10, 10), dtype=np.uint8)
random_image = Image.fromarray(image_data)
random_image.save(frame_path)
# read the image
image_data = np.array(Image.open(frame_path))
data = tf.io.read_file(frame_path)
image = decode_image(data, channels=1, extension=".png")
with tf.Session() as sess:
image_data_tf = sess.run(image)[..., 0]
assert np.array_equal(image_data, image_data_tf)
def test_jpeg_3_as_1():
"""test of decoding RGB jpeg image as grayscale."""
# generate a random RGB image
f, frame_path = tempfile.mkstemp(suffix=".jpg")
os.close(f)
image_data = np.random.randint(0, 256, (10, 10, 3), dtype=np.uint8)
random_image = Image.fromarray(image_data)
random_image.save(frame_path)
# read the image as grayscale
data = tf.io.read_file(frame_path)
image = decode_image(data, channels=1, extension=".jpg")
with tf.Session() as sess:
image_data_tf = sess.run(image)
# due to implementation differences, we cannnot match tf and PIL
# for the converted grayscale images
# so just check the channel number is 1
assert image_data_tf.shape[-1] == 1
# and the output grayscale image is not first channel data
# of the RGB image
image_data = np.array(Image.open(frame_path))
assert not np.array_equal(image_data_tf[..., 0], image_data[..., 0])
def test_png_3_as_1():
"""test of decoding RGB png image as grayscale."""
# generate a random RGB image
f, frame_path = tempfile.mkstemp(suffix=".png")
os.close(f)
image_data = np.random.randint(0, 256, (10, 10, 3), dtype=np.uint8)
random_image = Image.fromarray(image_data)
random_image.save(frame_path)
# read the image as grayscale
data = tf.io.read_file(frame_path)
image = decode_image(data, channels=1, extension=".png")
with tf.Session() as sess:
image_data_tf = sess.run(image)
# due to implementation differences, we cannnot match tf and PIL
# for the converted grayscale images
# so just check the channel number is 1
assert image_data_tf.shape[-1] == 1
# and the output grayscale image is not first channel data
# of the RGB image
image_data = np.array(Image.open(frame_path))
assert not np.array_equal(image_data_tf[..., 0], image_data[..., 0])
def test_jpeg_3_as_3():
"""test of decoding RGB jpeg image as RGB image."""
# generate a random RGB image
f, frame_path = tempfile.mkstemp(suffix=".jpg")
os.close(f)
image_data = np.random.randint(0, 256, (10, 10, 3), dtype=np.uint8)
random_image = Image.fromarray(image_data)
random_image.save(frame_path)
# read the image
data = tf.io.read_file(frame_path)
image = decode_image(data, channels=3, extension=".jpg")
with tf.Session() as sess:
image_data_tf = sess.run(image)
# check for the image shape
# we cannnot guarantee the tf output image and PIL output image
# are identical due to implementation differences of them.
assert image_data_tf.shape == (10, 10, 3)
def test_png_3_as_3():
"""test of decoding RGB png image as RGB image."""
# generate a random RGB image
f, frame_path = tempfile.mkstemp(suffix=".png")
os.close(f)
image_data = np.random.randint(0, 256, (10, 10, 3), dtype=np.uint8)
random_image = Image.fromarray(image_data)
random_image.save(frame_path)
# read the image
data = tf.io.read_file(frame_path)
image = decode_image(data, channels=3, extension=".png")
with tf.Session() as sess:
image_data_tf = sess.run(image)
# check for the image shape
# we cannnot guarantee the tf output image and PIL output image
# are identical due to implementation differences of them.
assert image_data_tf.shape == (10, 10, 3)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/decode_image_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Polygon label."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import (
Coordinates2D,
Coordinates2DWithCounts,
)
_Polygon2DLabel = namedtuple("Polygon2DLabel", ["vertices", "classes", "attributes"])
class Polygon2DLabel(_Polygon2DLabel):
"""
Polygon label.
vertices (Coordinates2D): Coordinates for vertices that make up this polygon.
classes (tf.SparseTensor): Classes associated with each polygon.
attributes (tf.SparseTensor): Attributes associated with each polygon.
"""
def compress_frame_dimension(self):
"""
Combines frame and batch dimensions in the vertices, classes and attributes.
Compresses vertices from 5D sparse (B, F, S, V, C) tensor to a 4D (B', S, V, C) where
B=Batch, F=Frame, S=Shape, V=Vertex, C=Coordinate(always dim 2) and B'=new Batach(BxF)
Compresses classes from 4D tensor of shape [B, F, S, C] to 3D tensor of shape [B', S, C]
where B=Batch, F=Frame, S=Shape, C=Class(currently always dim 1) and B'=new Batch(BxF)
Compresses attributes from 4D tensor of shape [B, F, S, A] to 3D tensor of shape [B', S, A]
where B=Batch, F=Frame, S=Shape, A=Attribute and B'=new Batch(BxF)
Allows label to be used in processors that don't handle sequence data like the rasterizer.
Returns
New Polygon2DLabel with the vertices, classes and attributes tensors reshaped
to combine batch and sequence dimensions.
"""
vertices = self.vertices
classes = self.classes
attributes = self.attributes
coordinates = vertices.coordinates
reshaped_coordinates = tf.sparse.reshape(
coordinates,
[
-1,
coordinates.dense_shape[-3],
coordinates.dense_shape[-2],
coordinates.dense_shape[-1],
],
)
reshaped_classes = tf.sparse.reshape(
classes,
tf.convert_to_tensor(
value=[-1, classes.dense_shape[-2], classes.dense_shape[-1]],
dtype=tf.int64,
),
)
# Check for empty attributes and handle appropriately.
new_attribute_shape = tf.cond(
pred=tf.equal(tf.reduce_sum(input_tensor=attributes.dense_shape), 0),
true_fn=lambda: tf.convert_to_tensor(value=[0, 0, 0], dtype=tf.int64),
false_fn=lambda: tf.convert_to_tensor(
value=[-1, attributes.dense_shape[-2], attributes.dense_shape[-1]],
dtype=tf.int64,
),
)
reshaped_attributes = tf.sparse.reshape(attributes, new_attribute_shape)
return Polygon2DLabel(
vertices=Coordinates2D(
coordinates=reshaped_coordinates, canvas_shape=vertices.canvas_shape
),
classes=reshaped_classes,
attributes=reshaped_attributes,
)
def slice_to_last_frame(self):
"""
Slices out all of the frames except for the last.
The input tensor is expected to have shape (B, F, S, V, C), and the output
tensor will be (B, F, S, V, C) with F=1.
Returns:
New Polygon2DLabel with the vertices, classes, and attributes tensors sliced
to only contain the final frame.
"""
vertices = self.vertices
classes = self.classes
attributes = self.attributes
coordinates = vertices.coordinates
vert_counts = vertices.vertices_count
slice_coords = _sparse_slice(coordinates, 1, -1)
slice_counts = _sparse_slice(vert_counts, 1, -1)
slice_classes = _sparse_slice(classes, 1, -1)
slice_attributes = _sparse_slice(attributes, 1, -1)
return Polygon2DLabel(
vertices=Coordinates2DWithCounts(
coordinates=slice_coords,
canvas_shape=vertices.canvas_shape,
vertices_count=slice_counts,
),
classes=slice_classes,
attributes=slice_attributes,
)
def _sparse_slice(tensor, axis, index):
shape = tensor.dense_shape
def _get_slice_prms(shape):
slice_start = np.zeros_like(shape)
slice_size = np.copy(shape)
idx = index
if idx < 0:
idx = shape[axis] + idx
slice_start[axis] = idx # noqa pylint: disable = E1137
slice_size[axis] = 1
return slice_start, slice_size
slice_start, slice_size = tf.compat.v1.py_func(
_get_slice_prms, [shape], (shape.dtype, shape.dtype), stateful=False
)
sliced_tensor = tf.sparse.slice(tensor, slice_start, slice_size)
return sliced_tensor
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/polygon2d_label.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Legacy LaneNet types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import tensorflow as tf
# TODO(vkallioniemi): These are only used from the old dataloader - delete once it gets deleted.
"""Value class for representing polygons.
Named tuples are compatible with TF datasets, conditionals and other control flow operations.
Based off of arguments passed to Maglev Rasterizer call here:
moduluspy/modulus/processors/processors.py:PolygonRasterizer#call
This structure allows for a batch of more than one image, but is often used with just one.
Fields:
polygons: a tensor of shape ``[P, 2]`` and type tf.float32. Each row contains the (x,y)
coordinates of a single vertex of a single polygon for a single image. The dimension P
is equal to the total number of vertices over all polygons.
vertices_per_polygon: a tensor of shape ``[P]`` and type tf.int32. The elements of the
tensor are the vertex counts for each polygon. Thus, the length of this list is equal to
the number of polygons we will draw (P), and if we were to sum all the values in this
list, the sum should equal the first dimension of the ``polygons`` list above. To get
the vertices for the ``n``th polygon it would be
``vertices[sum(vertices_per_polyogn[0:n-1]):n]``
class_ids_per_polygon: a tensor of shape ``[P]`` and type tf.int32
``vertices_per_polygon`` list above. Each list element is an ID representing
the class to which each polygon belongs.
polygons_per_image: A scalar or tensor of shape ``[I]`` and type tf.int32. The Number of
polygons in an image. Scalar for a single image or tensor for multiple images.
Note this is slightly different from maglev, where its `None` for a single image (the
default)
`attributes_per_polygon`: A tensor shape ``[P]`` and type tf.int32. Legacy field, where
there is 0 or 1 attribute per polygon. This field is populated with the filtered list of
attributes from JSONConverter.
`attributes: A tensor of shape` ``[polygons_per_image]`` and type tf.int32. All the
attributes associated with the polygons in this image. Mapping to polygons is done using
the `attributes_per_polygon` field.
`attribute_count_per_polygon`: At tensor of shape ``[vertices_per_polygon]`` and type
tf.int32. This field is used to map attributes to a polygon. Similarly to `vertices`,
`n`th class will have ``attributes_per_polygon[n]`` attributes associated with it. The
specific attributes will be
``attributes[sum(attributes_per_polygon[0:n-1]):attributes_per_polygon[n]]``
"""
PolygonLabel = namedtuple(
"PolygonLabel",
[
"polygons",
"vertices_per_polygon",
"class_ids_per_polygon",
"attributes_per_polygon",
"polygons_per_image",
"attributes",
"attribute_count_per_polygon",
],
)
def empty_polygon_label():
"""Method to create an empty PolygonLabel object."""
return PolygonLabel(
polygons=tf.constant([[]], shape=[0, 2], dtype=tf.float32),
vertices_per_polygon=tf.constant([], shape=[1], dtype=tf.int32),
class_ids_per_polygon=tf.constant([], shape=[1], dtype=tf.int32),
attributes_per_polygon=tf.constant([], shape=[1], dtype=tf.int32),
polygons_per_image=tf.constant([0], shape=[1], dtype=tf.int32),
attributes=tf.constant([], shape=[1], dtype=tf.int32),
attribute_count_per_polygon=tf.constant([0], shape=[1], dtype=tf.int32),
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/legacy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for Images2DReference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import shutil
import tempfile
import numpy as np
from parameterized import parameterized
from PIL import Image
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types import Canvas2D
from nvidia_tao_tf1.blocks.multi_source_loader.types import test_fixtures
from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d_reference import (
Images2DReference,
)
def _generate_images(
image_type, export_path, sequence_id, camera_name, frame_numbers, height, width
):
"""Create files filled with random numbers that look like exported fp16/jpeg/png images."""
dir_path = os.path.join(export_path, sequence_id, camera_name)
os.makedirs(dir_path)
paths = []
for frame_number in frame_numbers:
path = os.path.join(dir_path, "{}.{}".format(frame_number, image_type))
if image_type == "fp16":
# For fp16, the value of each pixel lies between 0~1.
image = np.random.rand(3, height, width).astype(np.float16)
image.tofile(path)
else:
# For jpeg and png, the value of each pixel lies between 0~255.
image = np.random.randint(255, size=(height, width, 3), dtype=np.uint8)
image = np.ascontiguousarray(image)
Image.fromarray(image, "RGB").save(path)
paths.append(path)
return paths
class Images2DReferenceTest(tf.test.TestCase):
def setUp(self):
self.export_path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.export_path)
@parameterized.expand(
[
["jpeg", 1, 3, 240, 480, np.float16],
["jpeg", 2, 3, 604, 960, np.float32],
["jpeg", 3, 3, 240, 480, np.float16],
["jpeg", 3, 3, 240, 480, np.uint8],
["png", 1, 3, 240, 480, np.float16],
["png", 2, 3, 604, 960, np.float32],
["png", 3, 3, 240, 480, np.float16],
["png", 3, 3, 240, 480, np.uint8],
]
)
def test_loads_4d_examples(
self, image_type, batch_size, channel_count, height, width, output_dtype
):
paths = _generate_images(
image_type=image_type,
export_path=self.export_path,
sequence_id="4d_batch",
camera_name="FOV_60",
frame_numbers=[number for number in range(batch_size)],
height=height,
width=width,
)
extensions = ["." + image_type for _ in range(batch_size)]
# Sprinkle some extra pixels to test that images are padded to a common output shape.
output_height = height + random.randint(4, 8)
output_width = width + random.randint(2, 10)
shapes = test_fixtures.make_canvas2d(batch_size, output_height, output_width)
input_height = [height] * batch_size
input_width = [width] * batch_size
assets = Images2DReference(
path=tf.constant(paths),
extension=tf.constant(extensions),
canvas_shape=shapes,
input_height=input_height,
input_width=input_width,
)
loaded = assets.load(output_dtype=output_dtype)
assert loaded.images.dtype == output_dtype
assert [
batch_size,
channel_count,
output_height,
output_width,
] == loaded.images.shape
with self.cached_session() as session:
loaded = session.run(loaded)
assert loaded.images.dtype == output_dtype
assert (
batch_size,
channel_count,
output_height,
output_width,
) == loaded.images.shape
if output_dtype == np.uint8:
# pixel of png and jpeg with dtype uint8 is normalized between 0.0 and 255.0.
self.assertAllInRange(loaded.images, 0.0, 255.0)
else:
self.assertAllInRange(loaded.images, 0.0, 1.0)
@parameterized.expand(
[
["jpeg", 1, 1, 3, 240, 480, np.uint8],
["jpeg", 1, 2, 3, 604, 960, np.float16],
["jpeg", 1, 3, 3, 240, 480, np.float32],
["jpeg", 2, 1, 3, 240, 480, np.uint8],
["jpeg", 2, 2, 3, 604, 960, np.float16],
["jpeg", 2, 3, 3, 240, 480, np.float32],
["png", 1, 1, 3, 240, 480, np.uint8],
["png", 1, 2, 3, 604, 960, np.float16],
["png", 1, 3, 3, 240, 480, np.float32],
["png", 2, 1, 3, 240, 480, np.uint8],
["png", 2, 2, 3, 604, 960, np.float16],
["png", 2, 3, 3, 240, 480, np.float32],
]
)
def test_loads_5d_examples(
self,
image_type,
batch_size,
window_size,
channel_count,
height,
width,
output_dtype,
):
paths = []
extensions = []
shapes = []
# Sprinkle some extra pixels to test that images are padded to a common output shape.
output_height = height + random.randint(4, 8)
output_width = width + random.randint(2, 10)
for batch in range(batch_size):
paths.append(
_generate_images(
image_type=image_type,
export_path=self.export_path,
sequence_id="5d_batch_{}".format(batch),
camera_name="FOV_60",
frame_numbers=[number for number in range(window_size)],
height=height,
width=width,
)
)
extensions.append(["." + image_type for _ in range(window_size)])
shapes.append(
test_fixtures.make_canvas2d(window_size, output_height, output_width)
)
input_height = [[height] * window_size] * batch_size
input_width = [[width] * window_size] * batch_size
assets = Images2DReference(
path=tf.constant(paths),
extension=tf.constant(extensions),
canvas_shape=Canvas2D(
height=tf.stack([shape.height for shape in shapes]),
width=tf.stack([shape.width for shape in shapes]),
),
input_height=input_height,
input_width=input_width,
)
loaded = assets.load(output_dtype=output_dtype)
assert loaded.images.dtype == output_dtype
assert [
batch_size,
window_size,
channel_count,
output_height,
output_width,
] == loaded.images.shape
with self.cached_session() as session:
loaded = session.run(loaded)
assert loaded.images.dtype == output_dtype
assert (
batch_size,
window_size,
channel_count,
output_height,
output_width,
) == loaded.images.shape
if output_dtype == np.uint8:
# pixel of png and jpeg with dtype uint8 is normalized between 0.0 and 255.0.
self.assertAllInRange(loaded.images, 0.0, 255.0)
else:
self.assertAllInRange(loaded.images, 0.0, 1.0)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/images2d_reference_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for Bbox2DLabel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import mock
import numpy as np
from parameterized import parameterized
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
test_fixtures as fixtures,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import (
_get_begin_and_end_indices,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import _to_ltrb
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import (
augment_marker_labels,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import (
Bbox2DLabel,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import (
filter_bbox_label_based_on_minimum_dims,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import (
Coordinates2D,
)
from modulus.processors.augment.spatial import get_random_spatial_transformation_matrix
from modulus.types import Canvas2D
from modulus.types import Transform
def _get_bbox_2d_label(shapes_per_frame, height=604, width=960):
"""
shapes_per_frame: Outer list is for example, inner list is for frames.
"""
label_kwargs = dict()
label_kwargs["frame_id"] = [] # Bogus.
label_kwargs["vertices"] = fixtures.make_coordinates2d(
shapes_per_frame=shapes_per_frame,
height=height,
width=width,
coordinates_per_polygon=2,
)
# tags = [examples[frames[shapes[tags,]]]]
def get_tags(tag):
tags = [
[[[tag] * num_shapes] for num_shapes in example]
for example in shapes_per_frame
]
return fixtures.make_tags(tags)
label_kwargs["object_class"] = get_tags("van")
label_kwargs["occlusion"] = get_tags(0)
label_kwargs["truncation"] = get_tags(0.0)
label_kwargs["truncation_type"] = get_tags(0)
label_kwargs["is_cvip"] = get_tags(False)
label_kwargs["world_bbox_z"] = get_tags(-1.0)
label_kwargs["non_facing"] = get_tags(False)
label_kwargs["front"] = get_tags(-1.0)
label_kwargs["back"] = get_tags(-1.0)
label_kwargs["source_weight"] = get_tags(1.0)
return Bbox2DLabel(**label_kwargs)
class Bbox2DLabelTest(tf.test.TestCase):
"""Test Bbox2dLabel and helper functions."""
@parameterized.expand([[[[2, 3], [4]]]])
def test_apply_calls_coordinates_transform(self, shapes_per_frame):
"""Test that the .vertices are calling the apply() method of Coordinates2D."""
example_count = len(shapes_per_frame)
height, width = 604, 960
bbox_2d_label = _get_bbox_2d_label(shapes_per_frame, height, width)
transform = fixtures.make_identity_transform(example_count, height, width)
with mock.patch.object(
bbox_2d_label.vertices, "apply", side_effect=bbox_2d_label.vertices.apply
) as spied_vertices_apply:
bbox_2d_label.apply(transform)
spied_vertices_apply.assert_called_with(transform)
@parameterized.expand([[[[3, 2], [1]]], [[[4, 6], [0]]]])
def test_to_ltrb(self, shapes_per_frame):
"""Test that _to_ltrb() works as advertised."""
height, width = 604, 960
coordinates_2d = fixtures.make_coordinates2d(
shapes_per_frame=shapes_per_frame,
height=height,
width=width,
coordinates_per_polygon=2,
)
with self.session() as session:
ltrb_coordinates = session.run(_to_ltrb(coordinates_2d.coordinates.values))
xmin = ltrb_coordinates[::4]
xmax = ltrb_coordinates[2::4]
ymin = ltrb_coordinates[1::4]
ymax = ltrb_coordinates[3::4]
self.assertTrue((xmax >= xmin).all())
self.assertTrue((ymax >= ymin).all())
@parameterized.expand(
[
[
[[2, 3], [4, 5]], # shapes_per_frame
[0, 5], # expected_begin_indices
[5, 14], # expected_end_indices
range(2),
]
]
)
def test_get_begin_and_end_indices(
self,
shapes_per_frame,
expected_begin_indices,
expected_end_indices,
expected_indices_index,
):
"""Test _get_begin_and_end_indices."""
height, width = 604, 960
bbox_2d_label = _get_bbox_2d_label(shapes_per_frame, height, width)
# Use object_class as a tf.SparseTensor to test with.
sparse_tensor = bbox_2d_label.object_class
with self.session() as session:
begin_indices, end_indices, indices_index = session.run(
_get_begin_and_end_indices(sparse_tensor)
)
self.assertAllEqual(begin_indices, expected_begin_indices)
self.assertAllEqual(end_indices, expected_end_indices)
self.assertAllEqual(indices_index, expected_indices_index)
@parameterized.expand([[[[2, 3], [4]]]])
def test_apply_calls_to_ltrb(self, shapes_per_frame):
"""Test that the apply method calls _to_ltrb."""
example_count = len(shapes_per_frame)
height, width = 604, 960
bbox_2d_label = _get_bbox_2d_label(shapes_per_frame, height, width)
transform = fixtures.make_identity_transform(example_count, height, width)
with mock.patch(
"nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label._to_ltrb",
side_effect=_to_ltrb,
) as spied_to_ltrb:
bbox_2d_label.apply(transform)
spied_to_ltrb.assert_called_once()
@parameterized.expand([[[[3, 2], [1]], [0, 4]]])
def test_filter(self, shapes_per_frame, valid_indices):
"""Test that the filter method works as advertised."""
height, width = 604, 960
bbox_2d_label = _get_bbox_2d_label(shapes_per_frame, height, width)
# Convert test arg to appropriate format.
num_indices = sum(functools.reduce(lambda x, y: x + y, shapes_per_frame))
valid_indices_tensor = tf.constant(
[index in valid_indices for index in range(num_indices)]
)
filtered_label = bbox_2d_label.filter(valid_indices_tensor)
with self.session() as session:
original_output, filtered_output = session.run(
[bbox_2d_label, filtered_label]
)
num_values = len(valid_indices)
# Check that the output sizes are sane, and that their contents are as
# expected.
for target_feature_name in Bbox2DLabel.TARGET_FEATURES:
if target_feature_name == "vertices":
original_feature = original_output.vertices.coordinates.values.reshape(
(-1, 4)
)
filtered_feature = filtered_output.vertices.coordinates.values
filtered_feature = filtered_feature.reshape((-1, 4))
assert filtered_feature.size == num_values * 4
else:
original_feature = getattr(original_output, target_feature_name).values
filtered_feature = getattr(filtered_output, target_feature_name).values
assert filtered_feature.size == num_values
for filtered_index, original_index in enumerate(valid_indices):
self.assertAllEqual(
original_feature[original_index], filtered_feature[filtered_index]
)
@parameterized.expand(
[
(0.0, [0.1, 0.2, 0.3, -1.0], [0.1, 0.2, 0.3, -1.0]),
(1.0, [0.1, 0.2, 0.3, -1.0], [0.9, 0.8, 0.7, -1.0]),
]
)
def test_augment_marker_labels(
self, hflip_probability, marker_labels, expected_labels
):
"""Test that augment_marker_labels augments the marker values correctly."""
# Get an stm.
stm = get_random_spatial_transformation_matrix(
width=10,
height=11,
flip_lr_prob=hflip_probability,
# The below shouldn't affect the test, so it is ok that they are random.
translate_max_x=5,
translate_max_y=3,
zoom_ratio_min=0.7,
zoom_ratio_max=1.2,
)
# Get some orientation labels.
marker_labels_tensor = tf.constant(marker_labels)
augmented_marker_labels_tensor = augment_marker_labels(
marker_labels_tensor, stm
)
with self.session() as session:
augmented_marker_labels = session.run(augmented_marker_labels_tensor)
self.assertAllClose(augmented_marker_labels, expected_labels)
@parameterized.expand([[[[2, 3], [4]]]])
def test_apply_calls_augment_marker_labels(self, shapes_per_frame):
"""Test that the apply method calls augment_marker_labels the expected number of times."""
example_count = len(shapes_per_frame)
height, width = 604, 960
bbox_2d_label = _get_bbox_2d_label(shapes_per_frame, height, width)
transform = fixtures.make_identity_transform(example_count, height, width)
with mock.patch(
"nvidia_tao_tf1.blocks.multi_source_loader."
"types.bbox_2d_label.augment_marker_labels",
side_effect=augment_marker_labels,
) as spied_augment_marker_labels:
bbox_2d_label.apply(transform)
assert spied_augment_marker_labels.call_count == len(shapes_per_frame)
@parameterized.expand([([1.5, 0.8], [[3], [2, 1]])])
def test_augment_depth(self, zoom_factors, shapes_per_frame):
"""Test that depth values are augmented as expected.
Args:
zoom_factors (list): List of zoom factors (float). Each element is the zoom factor
to apply to the corresponding example.
shapes_per_frame (list of lists): As expected by the make_coordinates2d test fixture.
"""
# First, get random spatial augmentation matrices with the expected zoom factors.
height, width = 604, 960
transform = Transform(
canvas_shape=fixtures.make_canvas2d(
count=len(shapes_per_frame), height=height, width=width
),
color_transform_matrix=tf.stack(
[tf.eye(4, dtype=tf.float32) for _ in shapes_per_frame]
),
spatial_transform_matrix=tf.stack(
[
get_random_spatial_transformation_matrix(
width=width,
height=height,
flip_lr_prob=0.5,
translate_max_x=4,
translate_max_y=6,
zoom_ratio_min=zoom_factor,
zoom_ratio_max=zoom_factor,
)
for zoom_factor in zoom_factors
]
),
)
bbox_2d_label = _get_bbox_2d_label(shapes_per_frame, height, width)
transformed_label = bbox_2d_label.apply(transform)
with self.session() as session:
original_depth, output_depth = session.run(
[bbox_2d_label.world_bbox_z, transformed_label.world_bbox_z]
)
i = 0
for zoom_factor, shape_list in zip(zoom_factors, shapes_per_frame):
num_shapes = sum(shape_list)
for _ in range(num_shapes):
self.assertAllClose(
zoom_factor * original_depth.values[i], output_depth.values[i]
)
i += 1
def test_filter_bbox_label_based_on_minimum_dims(self):
"""Test that fitler_bbox_label_based_on_minimum_dims works as advertised."""
num_objects = np.random.randint(low=5, high=10)
# Choose some indices for which the minimum dimensions will be satisfied.
expected_valid_indices = np.random.choice(num_objects, 4, replace=False)
random_ltrb_coords = []
min_height, min_width = np.random.uniform(low=0.0, high=3.0, size=2)
for i in range(num_objects):
x1, y1 = np.random.uniform(low=0.0, high=100.0, size=2)
if i in expected_valid_indices:
# Create coordinates for which the minimum dimensions will be satisfied.
x2 = x1 + min_width + 1.0
y2 = y1 + min_height + 1.0
else:
# Create coordinates for which the minimum dimensions WILL NOT be satisfied.
x2 = x1 + min_width - 1.0
y2 = y1 + min_height - 1.0
random_ltrb_coords.extend([x1, y1, x2, y2])
# Now, cast it into a ``Bbox2DLabel``.
kwargs = {field_name: [] for field_name in Bbox2DLabel._fields}
kwargs["vertices"] = Coordinates2D(
coordinates=tf.SparseTensor(
values=tf.constant(random_ltrb_coords),
indices=[[i] for i in range(num_objects * 4)],
dense_shape=[num_objects * 4],
),
canvas_shape=Canvas2D(height=604, width=960),
)
bbox_2d_label = Bbox2DLabel(**kwargs)
with mock.patch(
"nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label.Bbox2DLabel.filter"
) as mocked_filter:
filter_bbox_label_based_on_minimum_dims(
bbox_2d_label=bbox_2d_label, min_height=min_height, min_width=min_width
)
# Now, check that filter was called correctly.
computed_valid_indices_tensor = mocked_filter.call_args[1].pop(
"valid_indices"
)
with self.session() as session:
computed_valid_indices = session.run(computed_valid_indices_tensor)
self.assertEqual(computed_valid_indices.shape, (num_objects,))
for idx, computed_validity in enumerate(computed_valid_indices):
expected_validity = idx in expected_valid_indices
self.assertEqual(computed_validity, expected_validity)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/bbox_2d_label_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for transforming tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.processors import values_and_count_to_sparse_tensor
def map_and_stack(fn, elements, dtype=tf.int64, name=None):
"""
Stack tensors returned by passed in function along the 0th/row dimension.
This function works similar to tf.map_fn with the difference that the passed in function may
return tensors that are of different height.
Args:
fn (function): Function that should accept single tf.Tensor as argument and return
a tf.Tensor.
elements (tf.Tensor): Tensor to iterate over from left to right.
dtype (tf.DType): Data type of the tensors returned by function.
name (string): Optional name for the operations.
Returns:
(tf.Tensor): A tensor of type `dtype` with as many rows as is the size of passed in
elements. The dimensionality of each row matches the dimensionality of tensors
returned by the passed in function.
"""
with tf.compat.v1.name_scope(name, "map_and_stack", values=[elements]):
elements_total = tf.size(input=elements)
result = tf.TensorArray(
dtype=dtype, size=elements_total, infer_shape=False, name="result"
)
def _body(element_index, result):
value = tf.cast(fn(element_index), dtype)
result = result.write(element_index, value)
return (element_index + 1, result)
def _condition(element_index, result):
return element_index < elements_total
_, final_result = tf.while_loop(
cond=_condition,
body=_body,
loop_vars=(0, result),
back_prop=False,
name="loop",
)
return tf.cond(
pred=tf.equal(elements_total, 0),
true_fn=lambda: tf.constant([], dtype=dtype, name="empty_result"),
false_fn=lambda: final_result.concat(name="result"),
)
def vector_and_counts_to_sparse_tensor(vector, counts):
"""
Create a tf.SparseTensor representation of two dense vectors: vector and counts.
Dense vectors like this are typically used to represent variable length data as dense tensors.
E.g. you could have the following situation where an image has two polygon labels that each
have a different number of classes:
polygon 0 was labeled with 2 classes: "speed_limit_sign" and "60_MPH"
polygon 1 was labeled with 1 class: "person"
The dense vector representation of this would look like:
vector: tf.Tensor(["speed_limit_sign", "60_MPH", "person"])
counts: tf.Tensor([2, 1])
The first tensor ("vector") contains all class names and the second tensor ("counts") tells
you that the first two belong to the polygon at index 0 and the third one belongs to the
polygon at index 1.
This function encodes those two dense tensors as a single sparse tensor. The equivalent sparse
tensor representation would look like:
tf.SparseTensor(
values = ["speed_limit_sign", "60_MPH", "person"], # all values from `vector`
indices = [
[0, 0, 0], # 0th frame, 0th shape, 0th class (speed_limit_sign)
[0, 0, 1], # 0th frame, 0th shape, 1st class (60_MPH)
[0, 1, 0] # 0th frame, 1st shape, 0th class (person)
],
dense_shape = [1, 2, 2] # where values indicate 1: number of frames, 2: max number of
# shapes, 2: max number of classes.
)
Args:
vector (tf.Tensor): A dense tensor of shape [V].
counts (tf.Tensor): A dense tensor of shape [C].
Returns:
(tf.SparseTensor): A 3D sparse tensor representation of the 2 passed in dense tensors. The
sparse tensor consists of these 3 dense tensors that encode the inputs like:
values: 1D tensor of shape (V) - this tensor contains the values from the passed in
vector.
indices: 2D tensor of shape (T, 3), where T is the sum of all values in the passed
in counts vector. Each row in this tensor encodes what shape (polygon/line) a
value belongs to. The 0th column is always set to 0 as a performance
optimizaiton: this function is called from context where we are reading labels
for a single frame.
The dense_shape of the tensor is [V, E, C] where,
V: Total number of frames in the passed in vector - always 1.
E: Total number of "groups" that values belong to. (== len(counts))
C: Max number of values across all groups. (== max(counts))
"""
empty_sparse_tensor = tf.SparseTensor(
indices=tf.reshape(tf.constant([], tf.int64), [0, 2]),
values=tf.constant([], vector.dtype),
dense_shape=tf.constant([0, 0], tf.int64),
)
# Vectors are split into polygons (counts).
counts = tf.cast(counts, dtype=tf.int32)
regular_sparse_tensor = values_and_count_to_sparse_tensor(
values=vector, counts=counts
)
return tf.cond(
pred=tf.equal(tf.size(input=counts), tf.constant(0)),
true_fn=lambda: empty_sparse_tensor,
false_fn=lambda: regular_sparse_tensor,
)
def sparsify_dense_coordinates(dense_coordinates, vertex_counts_per_polygon):
"""
Convert dense coordinates to sparse coordinates.
Args:
dense_coordinates (tf.Tensor): Tensor of shape [N, 2] and type tf.float32.
This tensor encodes a list of x, y coordinate pairs (vertices).
vertex_counts_per_polygon (tf.Tensor): Tensor of shape [P] where each element
indicates thet number of vertices/coordinate-pairs that belong to each shape/polygon.
Returns:
(tf.SparseTensor): A 3D sparse tensor encoding the shape and coordinate information stored
in the dense tensors passed into this function. The shape of the tensor is
[S, V, C], where:
S: Shape - e.g. polygon, polyline or bouding box
V: Vertex (point) - e.g. a triangle has 3 vertices
C: Coordinate - x, y coordinate of each vertex.
"""
empty_sparse_tensor = tf.SparseTensor(
indices=tf.reshape(tf.constant([], tf.int64), [0, 3]),
values=tf.constant([], dense_coordinates.dtype),
dense_shape=tf.constant([0, 0, 0], tf.int64),
)
# Dense_coordinates are splitted into polygons (counts).
counts = tf.cast(vertex_counts_per_polygon, dtype=tf.int32)
regular_sparse_tensor = values_and_count_to_sparse_tensor(
values=dense_coordinates, counts=counts
)
return tf.cond(
pred=tf.equal(tf.size(input=counts), tf.constant(0)),
true_fn=lambda: empty_sparse_tensor,
false_fn=lambda: regular_sparse_tensor,
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/blocks/multi_source_loader/types/tensor_transforms.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.