python_code
stringlengths 0
456k
|
---|
"""
File contains the standard library of Python 3.7.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
stdlib = {
"_ast",
"_dummy_thread",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"dummy_threading",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"macpath",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
}
|
"""
File contains the standard library of Python 3.10.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
stdlib = {
"_ast",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
|
"""
File contains the standard library of Python 3.5.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
stdlib = {
"_ast",
"_dummy_thread",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"dummy_threading",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fpectl",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"macpath",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
}
|
"""Finders try to find right section for passed module name"""
import importlib.machinery
import inspect
import os
import os.path
import re
import sys
import sysconfig
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from fnmatch import fnmatch
from functools import lru_cache
from glob import glob
from pathlib import Path
from typing import Dict, Iterable, Iterator, List, Optional, Pattern, Sequence, Tuple, Type
from isort import sections
from isort.settings import KNOWN_SECTION_MAPPING, Config
from isort.utils import exists_case_sensitive
try:
from pipreqs import pipreqs # type: ignore
except ImportError:
pipreqs = None
try:
from pip_api import parse_requirements # type: ignore
except ImportError:
parse_requirements = None
try:
from requirementslib import Pipfile # type: ignore
except ImportError:
Pipfile = None
@contextmanager
def chdir(path: str) -> Iterator[None]:
"""Context manager for changing dir and restoring previous workdir after exit."""
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
class BaseFinder(metaclass=ABCMeta):
def __init__(self, config: Config) -> None:
self.config = config
@abstractmethod
def find(self, module_name: str) -> Optional[str]:
raise NotImplementedError
class ForcedSeparateFinder(BaseFinder):
def find(self, module_name: str) -> Optional[str]:
for forced_separate in self.config.forced_separate:
# Ensure all forced_separate patterns will match to end of string
path_glob = forced_separate
if not forced_separate.endswith("*"):
path_glob = "%s*" % forced_separate
if fnmatch(module_name, path_glob) or fnmatch(module_name, "." + path_glob):
return forced_separate
return None
class LocalFinder(BaseFinder):
def find(self, module_name: str) -> Optional[str]:
if module_name.startswith("."):
return "LOCALFOLDER"
return None
class KnownPatternFinder(BaseFinder):
def __init__(self, config: Config) -> None:
super().__init__(config)
self.known_patterns: List[Tuple[Pattern[str], str]] = []
for placement in reversed(config.sections):
known_placement = KNOWN_SECTION_MAPPING.get(placement, placement).lower()
config_key = f"known_{known_placement}"
known_patterns = list(
getattr(self.config, config_key, self.config.known_other.get(known_placement, []))
)
known_patterns = [
pattern
for known_pattern in known_patterns
for pattern in self._parse_known_pattern(known_pattern)
]
for known_pattern in known_patterns:
regexp = "^" + known_pattern.replace("*", ".*").replace("?", ".?") + "$"
self.known_patterns.append((re.compile(regexp), placement))
def _parse_known_pattern(self, pattern: str) -> List[str]:
"""Expand pattern if identified as a directory and return found sub packages"""
if pattern.endswith(os.path.sep):
patterns = [
filename
for filename in os.listdir(os.path.join(self.config.directory, pattern))
if os.path.isdir(os.path.join(self.config.directory, pattern, filename))
]
else:
patterns = [pattern]
return patterns
def find(self, module_name: str) -> Optional[str]:
# Try to find most specific placement instruction match (if any)
parts = module_name.split(".")
module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1))
for module_name_to_check in module_names_to_check:
for pattern, placement in self.known_patterns:
if pattern.match(module_name_to_check):
return placement
return None
class PathFinder(BaseFinder):
def __init__(self, config: Config, path: str = ".") -> None:
super().__init__(config)
# restore the original import path (i.e. not the path to bin/isort)
root_dir = os.path.abspath(path)
src_dir = f"{root_dir}/src"
self.paths = [root_dir, src_dir]
# virtual env
self.virtual_env = self.config.virtual_env or os.environ.get("VIRTUAL_ENV")
if self.virtual_env:
self.virtual_env = os.path.realpath(self.virtual_env)
self.virtual_env_src = ""
if self.virtual_env:
self.virtual_env_src = f"{self.virtual_env}/src/"
for venv_path in glob(f"{self.virtual_env}/lib/python*/site-packages"):
if venv_path not in self.paths:
self.paths.append(venv_path)
for nested_venv_path in glob(f"{self.virtual_env}/lib/python*/*/site-packages"):
if nested_venv_path not in self.paths:
self.paths.append(nested_venv_path)
for venv_src_path in glob(f"{self.virtual_env}/src/*"):
if os.path.isdir(venv_src_path):
self.paths.append(venv_src_path)
# conda
self.conda_env = self.config.conda_env or os.environ.get("CONDA_PREFIX") or ""
if self.conda_env:
self.conda_env = os.path.realpath(self.conda_env)
for conda_path in glob(f"{self.conda_env}/lib/python*/site-packages"):
if conda_path not in self.paths:
self.paths.append(conda_path)
for nested_conda_path in glob(f"{self.conda_env}/lib/python*/*/site-packages"):
if nested_conda_path not in self.paths:
self.paths.append(nested_conda_path)
# handle case-insensitive paths on windows
self.stdlib_lib_prefix = os.path.normcase(sysconfig.get_paths()["stdlib"])
if self.stdlib_lib_prefix not in self.paths:
self.paths.append(self.stdlib_lib_prefix)
# add system paths
for system_path in sys.path[1:]:
if system_path not in self.paths:
self.paths.append(system_path)
def find(self, module_name: str) -> Optional[str]:
for prefix in self.paths:
package_path = "/".join((prefix, module_name.split(".")[0]))
path_obj = Path(package_path).resolve()
is_module = (
exists_case_sensitive(package_path + ".py")
or any(
exists_case_sensitive(package_path + ext_suffix)
for ext_suffix in importlib.machinery.EXTENSION_SUFFIXES
)
or exists_case_sensitive(package_path + "/__init__.py")
)
is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path)
if is_module or is_package:
if (
"site-packages" in prefix
or "dist-packages" in prefix
or (self.virtual_env and self.virtual_env_src in prefix)
):
return sections.THIRDPARTY
if os.path.normcase(prefix) == self.stdlib_lib_prefix:
return sections.STDLIB
if self.conda_env and self.conda_env in prefix:
return sections.THIRDPARTY
for src_path in self.config.src_paths:
if src_path in path_obj.parents and not self.config.is_skipped(path_obj):
return sections.FIRSTPARTY
if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix):
return sections.STDLIB # pragma: no cover - edge case for one OS. Hard to test.
return self.config.default_section
return None
class ReqsBaseFinder(BaseFinder):
enabled = False
def __init__(self, config: Config, path: str = ".") -> None:
super().__init__(config)
self.path = path
if self.enabled:
self.mapping = self._load_mapping()
self.names = self._load_names()
@abstractmethod
def _get_names(self, path: str) -> Iterator[str]:
raise NotImplementedError
@abstractmethod
def _get_files_from_dir(self, path: str) -> Iterator[str]:
raise NotImplementedError
@staticmethod
def _load_mapping() -> Optional[Dict[str, str]]:
"""Return list of mappings `package_name -> module_name`
Example:
django-haystack -> haystack
"""
if not pipreqs:
return None
path = os.path.dirname(inspect.getfile(pipreqs))
path = os.path.join(path, "mapping")
with open(path) as f:
mappings: Dict[str, str] = {} # pypi_name: import_name
for line in f:
import_name, _, pypi_name = line.strip().partition(":")
mappings[pypi_name] = import_name
return mappings
# return dict(tuple(line.strip().split(":")[::-1]) for line in f)
def _load_names(self) -> List[str]:
"""Return list of thirdparty modules from requirements"""
names = []
for path in self._get_files():
for name in self._get_names(path):
names.append(self._normalize_name(name))
return names
@staticmethod
def _get_parents(path: str) -> Iterator[str]:
prev = ""
while path != prev:
prev = path
yield path
path = os.path.dirname(path)
def _get_files(self) -> Iterator[str]:
"""Return paths to all requirements files"""
path = os.path.abspath(self.path)
if os.path.isfile(path):
path = os.path.dirname(path)
for path in self._get_parents(path):
yield from self._get_files_from_dir(path)
def _normalize_name(self, name: str) -> str:
"""Convert package name to module name
Examples:
Django -> django
django-haystack -> django_haystack
Flask-RESTFul -> flask_restful
"""
if self.mapping:
name = self.mapping.get(name.replace("-", "_"), name)
return name.lower().replace("-", "_")
def find(self, module_name: str) -> Optional[str]:
# required lib not installed yet
if not self.enabled:
return None
module_name, _sep, _submodules = module_name.partition(".")
module_name = module_name.lower()
if not module_name:
return None
for name in self.names:
if module_name == name:
return sections.THIRDPARTY
return None
class RequirementsFinder(ReqsBaseFinder):
exts = (".txt", ".in")
enabled = bool(parse_requirements)
def _get_files_from_dir(self, path: str) -> Iterator[str]:
"""Return paths to requirements files from passed dir."""
yield from self._get_files_from_dir_cached(path)
@classmethod
@lru_cache(maxsize=16)
def _get_files_from_dir_cached(cls, path: str) -> List[str]:
results = []
for fname in os.listdir(path):
if "requirements" not in fname:
continue
full_path = os.path.join(path, fname)
# *requirements*/*.{txt,in}
if os.path.isdir(full_path):
for subfile_name in os.listdir(full_path):
for ext in cls.exts:
if subfile_name.endswith(ext):
results.append(os.path.join(full_path, subfile_name))
continue
# *requirements*.{txt,in}
if os.path.isfile(full_path):
for ext in cls.exts:
if fname.endswith(ext):
results.append(full_path)
break
return results
def _get_names(self, path: str) -> Iterator[str]:
"""Load required packages from path to requirements file"""
yield from self._get_names_cached(path)
@classmethod
@lru_cache(maxsize=16)
def _get_names_cached(cls, path: str) -> List[str]:
result = []
with chdir(os.path.dirname(path)):
requirements = parse_requirements(path)
for req in requirements.values():
if req.name:
result.append(req.name)
return result
class PipfileFinder(ReqsBaseFinder):
enabled = bool(Pipfile)
def _get_names(self, path: str) -> Iterator[str]:
with chdir(path):
project = Pipfile.load(path)
for req in project.packages:
yield req.name
def _get_files_from_dir(self, path: str) -> Iterator[str]:
if "Pipfile" in os.listdir(path):
yield path
class DefaultFinder(BaseFinder):
def find(self, module_name: str) -> Optional[str]:
return self.config.default_section
class FindersManager:
_default_finders_classes: Sequence[Type[BaseFinder]] = (
ForcedSeparateFinder,
LocalFinder,
KnownPatternFinder,
PathFinder,
PipfileFinder,
RequirementsFinder,
DefaultFinder,
)
def __init__(
self, config: Config, finder_classes: Optional[Iterable[Type[BaseFinder]]] = None
) -> None:
self.verbose: bool = config.verbose
if finder_classes is None:
finder_classes = self._default_finders_classes
finders: List[BaseFinder] = []
for finder_cls in finder_classes:
try:
finders.append(finder_cls(config))
except Exception as exception:
# if one finder fails to instantiate isort can continue using the rest
if self.verbose:
print(
(
f"{finder_cls.__name__} encountered an error ({exception}) during "
"instantiation and cannot be used"
)
)
self.finders: Tuple[BaseFinder, ...] = tuple(finders)
def find(self, module_name: str) -> Optional[str]:
for finder in self.finders:
try:
section = finder.find(module_name)
if section is not None:
return section
except Exception as exception:
# isort has to be able to keep trying to identify the correct
# import section even if one approach fails
if self.verbose:
print(
f"{finder.__class__.__name__} encountered an error ({exception}) while "
f"trying to identify the {module_name} module"
)
return None
|
from natsort import natsorted
def natural_plus(*args, **kwargs) -> str:
"""An even more natural sorting order for isort using natsort."""
return natsorted(*args, **kwargs)
|
#! /bin/env python
import os
from textwrap import dedent
from typing import Any, Dict, Generator, Iterable, Optional, Type
from isort._future import dataclass
from isort.main import _build_arg_parser
from isort.settings import _DEFAULT_SETTINGS as config
OUTPUT_FILE = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../docs/configuration/options.md")
)
MD_NEWLINE = " "
HUMAN_NAME = {
"py_version": "Python Version",
"vn": "Version Number",
"str": "String",
"frozenset": "List of Strings",
"tuple": "List of Strings",
}
CONFIG_DEFAULTS = {"False": "false", "True": "true", "None": ""}
DESCRIPTIONS = {}
IGNORED = {"source", "help", "sources", "directory"}
COLUMNS = ["Name", "Type", "Default", "Python / Config file", "CLI", "Description"]
HEADER = """# Configuration options for isort
As a code formatter isort has opinions. However, it also allows you to have your own. If your opinions disagree with those of isort,
isort will disagree but commit to your way of formatting. To enable this, isort exposes a plethora of options to specify
how you want your imports sorted, organized, and formatted.
Too busy to build your perfect isort configuration? For curated common configurations, see isort's [built-in
profiles](https://pycqa.github.io/isort/docs/configuration/profiles.html).
"""
parser = _build_arg_parser()
@dataclass
class Example:
section_complete: str = ""
cfg: str = ""
pyproject_toml: str = ""
cli: str = ""
def __post_init__(self):
if self.cfg or self.pyproject_toml or self.cli:
if self.cfg:
cfg = dedent(self.cfg).lstrip()
self.cfg = (
dedent(
"""
### Example `.isort.cfg`
```
[settings]
{cfg}
```
"""
)
.format(cfg=cfg)
.lstrip()
)
if self.pyproject_toml:
pyproject_toml = dedent(self.pyproject_toml).lstrip()
self.pyproject_toml = (
dedent(
"""
### Example `pyproject.toml`
```
[tool.isort]
{pyproject_toml}
```
"""
)
.format(pyproject_toml=pyproject_toml)
.lstrip()
)
if self.cli:
cli = dedent(self.cli).lstrip()
self.cli = (
dedent(
"""
### Example cli usage
`{cli}`
"""
)
.format(cli=cli)
.lstrip()
)
sections = [s for s in [self.cfg, self.pyproject_toml, self.cli] if s]
sections_str = "\n".join(sections)
self.section_complete = f"""**Examples:**
{sections_str}"""
else:
self.section_complete = ""
def __str__(self):
return self.section_complete
description_mapping: Dict[str, str]
description_mapping = {
"length_sort_sections": "Sort the given sections by length",
"forced_separate": "Force certain sub modules to show separately",
"sections": "What sections isort should display imports for and in what order",
"known_other": "known_OTHER is how imports of custom sections are defined. "
"OTHER is a placeholder for the custom section name.",
"comment_prefix": "Allows customizing how isort prefixes comments that it adds or modifies on import lines"
"Generally ` #` (two spaces before a pound symbol) is use, though one space is also common.",
"lines_before_imports": "The number of blank lines to place before imports. -1 for automatic determination",
"lines_after_imports": "The number of blank lines to place after imports. -1 for automatic determination",
"lines_between_sections": "The number of lines to place between sections",
"lines_between_types": "The number of lines to place between direct and from imports",
"lexicographical": "Lexicographical order is strictly alphabetical order. "
"For example by default isort will sort `1, 10, 2` into `1, 2, 10` - but with "
"lexicographical sorting enabled it will remain `1, 10, 2`.",
"ignore_comments": "If enabled, isort will strip comments that exist within import lines.",
"constants": "An override list of tokens to always recognize as a CONSTANT for order_by_type regardless of casing.",
"classes": "An override list of tokens to always recognize as a Class for order_by_type regardless of casing.",
"variables": "An override list of tokens to always recognize as a var for order_by_type regardless of casing.",
"auto_identify_namespace_packages": "Automatically determine local namespace packages, generally by lack of any src files before a src containing directory.",
"namespace_packages": "Manually specify one or more namespace packages.",
"follow_links": "If `True` isort will follow symbolic links when doing recursive sorting.",
"git_ignore": "If `True` isort will honor ignores within locally defined .git_ignore files.",
"formatting_function": "The fully qualified Python path of a function to apply to format code sorted by isort.",
"group_by_package": "If `True` isort will automatically create section groups by the top-level package they come from.",
"indented_import_headings": "If `True` isort will apply import headings to indended imports the same way it does unindented ones.",
"import_headings": "A mapping of import sections to import heading comments that should show above them.",
"import_footers": "A mapping of import sections to import footer comments that should show below them.",
}
example_mapping: Dict[str, Example]
example_mapping = {
"skip": Example(
cfg="""
skip=.gitignore,.dockerignore""",
pyproject_toml="""
skip = [".gitignore", ".dockerignore"]
""",
),
"extend_skip": Example(
cfg="""
extend_skip=.md,.json""",
pyproject_toml="""
extend_skip = [".md", ".json"]
""",
),
"skip_glob": Example(
cfg="""
skip_glob=docs/*
""",
pyproject_toml="""
skip_glob = ["docs/*"]
""",
),
"extend_skip_glob": Example(
cfg="""
extend_skip_glob=my_*_module.py,test/*
""",
pyproject_toml="""
extend_skip_glob = ["my_*_module.py", "test/*"]
""",
),
"known_third_party": Example(
cfg="""
known_third_party=my_module1,my_module2
""",
pyproject_toml="""
known_third_party = ["my_module1", "my_module2"]
""",
),
"known_first_party": Example(
cfg="""
known_first_party=my_module1,my_module2
""",
pyproject_toml="""
known_first_party = ["my_module1", "my_module2"]
""",
),
"known_local_folder": Example(
cfg="""
known_local_folder=my_module1,my_module2
""",
pyproject_toml="""
known_local_folder = ["my_module1", "my_module2"]
""",
),
"known_standard_library": Example(
cfg="""
known_standard_library=my_module1,my_module2
""",
pyproject_toml="""
known_standard_library = ["my_module1", "my_module2"]
""",
),
"extra_standard_library": Example(
cfg="""
extra_standard_library=my_module1,my_module2
""",
pyproject_toml="""
extra_standard_library = ["my_module1", "my_module2"]
""",
),
"forced_separate": Example(
cfg="""
forced_separate=glob_exp1,glob_exp2
""",
pyproject_toml="""
forced_separate = ["glob_exp1", "glob_exp2"]
""",
),
"length_sort_sections": Example(
cfg="""
length_sort_sections=future,stdlib
""",
pyproject_toml="""
length_sort_sections = ["future", "stdlib"]
""",
),
"add_imports": Example(
cfg="""
add_imports=import os,import json
""",
pyproject_toml="""
add_imports = ["import os", "import json"]
""",
),
"remove_imports": Example(
cfg="""
remove_imports=os,json
""",
pyproject_toml="""
remove_imports = ["os", "json"]
""",
),
"single_line_exclusions": Example(
cfg="""
single_line_exclusions=os,json
""",
pyproject_toml="""
single_line_exclusions = ["os", "json"]
""",
),
"no_lines_before": Example(
cfg="""
no_lines_before=future,stdlib
""",
pyproject_toml="""
no_lines_before = ["future", "stdlib"]
""",
),
"src_paths": Example(
cfg="""
src_paths = src,tests
""",
pyproject_toml="""
src_paths = ["src", "tests"]
""",
),
"treat_comments_as_code": Example(
cfg="""
treat_comments_as_code = # my comment 1, # my other comment
""",
pyproject_toml="""
treat_comments_as_code = ["# my comment 1", "# my other comment"]
""",
),
"supported_extensions": Example(
cfg="""
supported_extensions=pyw,ext
""",
pyproject_toml="""
supported_extensions = ["pyw", "ext"]
""",
),
"blocked_extensions": Example(
cfg="""
blocked_extensions=pyw,pyc
""",
pyproject_toml="""
blocked_extensions = ["pyw", "pyc"]
""",
),
"known_other": Example(
cfg="""
sections=FUTURE,STDLIB,THIRDPARTY,AIRFLOW,FIRSTPARTY,LOCALFOLDER
known_airflow=airflow""",
pyproject_toml="""
sections = ['FUTURE', 'STDLIB', 'THIRDPARTY', 'AIRFLOW', 'FIRSTPARTY', 'LOCALFOLDER']
known_airflow = ['airflow']""",
),
"multi_line_output": Example(cfg="multi_line_output=3", pyproject_toml="multi_line_output = 3"),
"show_version": Example(cli="isort --version"),
"py_version": Example(
cli="isort --py 39",
pyproject_toml="""
py_version=39
""",
cfg="""
py_version=39
""",
),
}
@dataclass
class ConfigOption:
name: str
type: Type = str
default: Any = ""
config_name: str = "**Not Supported**"
cli_options: Iterable[str] = (" **Not Supported**",)
description: str = "**No Description**"
example: Optional[Example] = None
def __str__(self):
if self.name in IGNORED:
return ""
if self.cli_options == (" **Not Supported**",):
cli_options = self.cli_options[0]
else:
cli_options = "\n\n- " + "\n- ".join(self.cli_options)
# new line if example otherwise nothing
example = f"\n{self.example}" if self.example else ""
return f"""
## {human(self.name)}
{self.description}
**Type:** {human(self.type.__name__)}{MD_NEWLINE}
**Default:** `{str(self.default) or " "}`{MD_NEWLINE}
**Config default:** `{config_default(self.default) or " "}`{MD_NEWLINE}
**Python & Config File Name:** {self.config_name}{MD_NEWLINE}
**CLI Flags:**{cli_options}
{example}"""
def config_default(default: Any) -> str:
if isinstance(default, (frozenset, tuple)):
default = list(default)
default_str = str(default)
if default_str in CONFIG_DEFAULTS:
return CONFIG_DEFAULTS[default_str]
if default_str.startswith("py"):
return default_str[2:]
return default_str
def human(name: str) -> str:
if name in HUMAN_NAME:
return HUMAN_NAME[name]
return " ".join(
part if part in ("of",) else part.capitalize() for part in name.replace("-", "_").split("_")
)
def config_options() -> Generator[ConfigOption, None, None]:
cli_actions = {action.dest: action for action in parser._actions}
for name, default in config.items():
extra_kwargs = {}
description: Optional[str] = description_mapping.get(name, None)
cli = cli_actions.pop(name, None)
if cli:
extra_kwargs["cli_options"] = cli.option_strings
if cli.help and not description:
description = cli.help
default_display = default
if isinstance(default, (set, frozenset)) and len(default) > 0:
default_display = tuple(sorted(default))
# todo: refactor place for example params
# needs to integrate with isort/settings/_Config
# needs to integrate with isort/main/_build_arg_parser
yield ConfigOption(
name=name,
type=type(default),
default=default_display,
config_name=name,
description=description or "**No Description**",
example=example_mapping.get(name, None),
**extra_kwargs,
)
for name, cli in cli_actions.items():
extra_kwargs = {}
description: Optional[str] = description_mapping.get(name, None)
if cli.type:
extra_kwargs["type"] = cli.type
elif cli.default is not None:
extra_kwargs["type"] = type(cli.default)
if cli.help and not description:
description = cli.help
yield ConfigOption(
name=name,
default=cli.default,
cli_options=cli.option_strings,
example=example_mapping.get(name, None),
description=description or "**No Description**",
**extra_kwargs,
)
def document_text() -> str:
return f"{HEADER}{''.join(str(config_option) for config_option in config_options())}"
def write_document():
with open(OUTPUT_FILE, "w") as output_file:
output_file.write(document_text())
if __name__ == "__main__":
write_document()
|
#!/usr/bin/env python3
from sphinx.ext.intersphinx import fetch_inventory
URL = "https://docs.python.org/{}/objects.inv"
PATH = "isort/stdlibs/py{}.py"
VERSIONS = [("2", "7"), ("3", "5"), ("3", "6"), ("3", "7"), ("3", "8"), ("3", "9"), ("3", "10")]
DOCSTRING = """
File contains the standard library of Python {}.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
class FakeConfig:
intersphinx_timeout = None
tls_verify = True
user_agent = ""
class FakeApp:
srcdir = ""
config = FakeConfig()
for version_info in VERSIONS:
version = ".".join(version_info)
url = URL.format(version)
invdata = fetch_inventory(FakeApp(), "", url)
# Any modules we want to enforce across Python versions stdlib can be included in set init
modules = {"_ast", "posixpath", "ntpath", "sre_constants", "sre_parse", "sre_compile", "sre"}
for module in invdata["py:module"]:
root, *_ = module.split(".")
if root not in ["__future__", "__main__"]:
modules.add(root)
path = PATH.format("".join(version_info))
with open(path, "w") as stdlib_file:
docstring = DOCSTRING.format(version)
stdlib_file.write(f'"""{docstring}"""\n\n')
stdlib_file.write("stdlib = {\n")
for module in sorted(modules):
stdlib_file.write(f' "{module}",\n')
stdlib_file.write("}\n")
|
#!/usr/bin/env python3
import asyncio
import sys
from getpass import getpass
from pathlib import Path
from typing import Dict
import httpx
import hug
IGNORED_AUTHOR_LOGINS = {"deepsource-autofix[bot]"}
REPO = "pycqa/isort"
GITHUB_API_CONTRIBUTORS = f"https://api.github.com/repos/{REPO}/contributors"
GITHUB_USER_CONTRIBUTIONS = f"https://github.com/{REPO}/commits?author="
GITHUB_USER_TYPE = "User"
USER_DELIMITER = "-" * 80
PER_PAGE = 100
_ACK_FILE = Path(__file__).parent.parent / "docs" / "contributing" / "4.-acknowledgements.md"
ACKNOWLEDGEMENTS = _ACK_FILE.read_text().lower()
def _user_info(user: Dict[str, str], verbose=False) -> str:
login = "@" + user["login"]
name = user.get("name")
display_name = f"{name} ({login})" if name else login
user_info = f"- {display_name}"
if verbose:
contributions = f" {GITHUB_USER_CONTRIBUTIONS}{user['login']}"
user_info += "\n" + contributions
return user_info
@hug.cli()
async def main():
auth = (input("Github Username: "), getpass())
async with httpx.AsyncClient() as client:
page = 0
results = []
contributors = []
while not page or len(results) == PER_PAGE:
page += 1
response = await client.get(
f"{GITHUB_API_CONTRIBUTORS}?per_page={PER_PAGE}&page={page}", auth=auth
)
results = response.json()
contributors.extend(
(
contributor
for contributor in results
if contributor["type"] == GITHUB_USER_TYPE
and contributor["login"] not in IGNORED_AUTHOR_LOGINS
and f"@{contributor['login'].lower()}" not in ACKNOWLEDGEMENTS
)
)
unacknowledged_users = await asyncio.gather(
*(client.get(contributor["url"], auth=auth) for contributor in contributors)
)
unacknowledged_users = [request.json() for request in unacknowledged_users]
if not unacknowledged_users:
sys.exit()
print("Found unacknowledged authors:")
print()
for user in unacknowledged_users:
print(_user_info(user, verbose=True))
print(USER_DELIMITER)
print()
print("Printing again for easy inclusion in Markdown file:")
print()
for user in unacknowledged_users:
print(_user_info(user))
sys.exit(1)
if __name__ == "__main__":
main.interface.cli()
|
#! /bin/env python
import os
from typing import Any, Dict, Generator, Iterable, Type
from isort.profiles import profiles
OUTPUT_FILE = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../docs/configuration/profiles.md")
)
HEADER = """Built-in Profile for isort
========
The following profiles are built into isort to allow easy interoperability with
common projects and code styles.
To use any of the listed profiles, use `isort --profile PROFILE_NAME` from the command line, or `profile=PROFILE_NAME` in your configuration file.
"""
def format_profile(profile_name: str, profile: Dict[str, Any]) -> str:
options = "\n".join(f" - **{name}**: `{repr(value)}`" for name, value in profile.items())
return f"""
#{profile_name}
{profile.get('description', '')}
{options}
"""
def document_text() -> str:
return f"{HEADER}{''.join(format_profile(profile_name, profile) for profile_name, profile in profiles.items())}"
def write_document():
with open(OUTPUT_FILE, "w") as output_file:
output_file.write(document_text())
if __name__ == "__main__":
write_document()
|
PROFILE = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"line_length": 100,
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.spawn import find_executable
from distutils import sysconfig, log
import setuptools
import setuptools.command.build_py
import setuptools.command.develop
import setuptools.command.build_ext
from collections import namedtuple
from contextlib import contextmanager
import glob
import os
import shlex
import subprocess
import sys
import platform
from textwrap import dedent
import multiprocessing
TOP_DIR = os.path.realpath(os.path.dirname(__file__))
SRC_DIR = os.path.join(TOP_DIR, 'onnx')
TP_DIR = os.path.join(TOP_DIR, 'third_party')
CMAKE_BUILD_DIR = os.path.join(TOP_DIR, '.setuptools-cmake-build')
WINDOWS = (os.name == 'nt')
CMAKE = find_executable('cmake3') or find_executable('cmake')
MAKE = find_executable('make')
install_requires = []
setup_requires = []
tests_require = []
extras_require = {}
################################################################################
# Global variables for controlling the build variant
################################################################################
# Default value is set to TRUE\1 to keep the settings same as the current ones.
# However going forward the recomemded way to is to set this to False\0
USE_MSVC_STATIC_RUNTIME = bool(os.getenv('USE_MSVC_STATIC_RUNTIME', '1') == '1')
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_VERIFY_PROTO3 = bool(os.getenv('ONNX_VERIFY_PROTO3') == '1')
ONNX_NAMESPACE = os.getenv('ONNX_NAMESPACE', 'onnx')
ONNX_BUILD_TESTS = bool(os.getenv('ONNX_BUILD_TESTS') == '1')
DEBUG = bool(os.getenv('DEBUG'))
COVERAGE = bool(os.getenv('COVERAGE'))
################################################################################
# Version
################################################################################
try:
git_version = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=TOP_DIR).decode('ascii').strip()
except (OSError, subprocess.CalledProcessError):
git_version = None
with open(os.path.join(TOP_DIR, 'VERSION_NUMBER')) as version_file:
VersionInfo = namedtuple('VersionInfo', ['version', 'git_version'])(
version=version_file.read().strip(),
git_version=git_version
)
################################################################################
# Pre Check
################################################################################
assert CMAKE, 'Could not find "cmake" executable!'
################################################################################
# Utilities
################################################################################
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError('Can only cd to absolute path, got: {}'.format(path))
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
################################################################################
# Customized commands
################################################################################
class ONNXCommand(setuptools.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class create_version(ONNXCommand):
def run(self):
with open(os.path.join(SRC_DIR, 'version.py'), 'w') as f:
f.write(dedent('''\
# This file is generated by setup.py. DO NOT EDIT!
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
version = '{version}'
git_version = '{git_version}'
'''.format(**dict(VersionInfo._asdict()))))
class cmake_build(setuptools.Command):
"""
Compiles everything when `python setupmnm.py build` is run using cmake.
Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
environment variable.
The number of CPUs used by `make` can be specified by passing `-j<ncpus>`
to `setup.py build`. By default all CPUs are used.
"""
user_options = [
(str('jobs='), str('j'), str('Specifies the number of jobs to use with make'))
]
built = False
def initialize_options(self):
self.jobs = multiprocessing.cpu_count()
def finalize_options(self):
self.jobs = int(self.jobs)
def run(self):
if cmake_build.built:
return
cmake_build.built = True
if not os.path.exists(CMAKE_BUILD_DIR):
os.makedirs(CMAKE_BUILD_DIR)
with cd(CMAKE_BUILD_DIR):
build_type = 'Release'
# configure
cmake_args = [
CMAKE,
'-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_python_inc()),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
'-DBUILD_ONNX_PYTHON=ON',
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DONNX_NAMESPACE={}'.format(ONNX_NAMESPACE),
'-DPY_EXT_SUFFIX={}'.format(sysconfig.get_config_var('EXT_SUFFIX') or ''),
]
if COVERAGE:
cmake_args.append('-DONNX_COVERAGE=ON')
if COVERAGE or DEBUG:
# in order to get accurate coverage information, the
# build needs to turn off optimizations
build_type = 'Debug'
cmake_args.append('-DCMAKE_BUILD_TYPE=%s' % build_type)
if WINDOWS:
cmake_args.extend([
# we need to link with libpython on windows, so
# passing python version to window in order to
# find python in cmake
'-DPY_VERSION={}'.format('{0}.{1}'.format(*sys.version_info[:2])),
])
if USE_MSVC_STATIC_RUNTIME:
cmake_args.append('-DONNX_USE_MSVC_STATIC_RUNTIME=ON')
if platform.architecture()[0] == '64bit':
cmake_args.extend(['-A', 'x64', '-T', 'host=x64'])
else:
cmake_args.extend(['-A', 'Win32', '-T', 'host=x86'])
if ONNX_ML:
cmake_args.append('-DONNX_ML=1')
if ONNX_VERIFY_PROTO3:
cmake_args.append('-DONNX_VERIFY_PROTO3=1')
if ONNX_BUILD_TESTS:
cmake_args.append('-DONNX_BUILD_TESTS=ON')
if 'CMAKE_ARGS' in os.environ:
extra_cmake_args = shlex.split(os.environ['CMAKE_ARGS'])
# prevent crossfire with downstream scripts
del os.environ['CMAKE_ARGS']
log.info('Extra cmake args: {}'.format(extra_cmake_args))
cmake_args.extend(extra_cmake_args)
cmake_args.append(TOP_DIR)
subprocess.check_call(cmake_args)
build_args = [CMAKE, '--build', os.curdir]
if WINDOWS:
build_args.extend(['--config', build_type])
build_args.extend(['--', '/maxcpucount:{}'.format(self.jobs)])
else:
build_args.extend(['--', '-j', str(self.jobs)])
subprocess.check_call(build_args)
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.run_command('create_version')
self.run_command('cmake_build')
generated_python_files = \
glob.glob(os.path.join(CMAKE_BUILD_DIR, 'onnx', '*.py')) + \
glob.glob(os.path.join(CMAKE_BUILD_DIR, 'onnx', '*.pyi'))
for src in generated_python_files:
dst = os.path.join(
TOP_DIR, os.path.relpath(src, CMAKE_BUILD_DIR))
self.copy_file(src, dst)
return setuptools.command.build_py.build_py.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
self.run_command('build_py')
setuptools.command.develop.develop.run(self)
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
self.run_command('cmake_build')
setuptools.command.build_ext.build_ext.run(self)
def build_extensions(self):
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.basename(self.get_ext_filename(fullname))
lib_path = CMAKE_BUILD_DIR
if os.name == 'nt':
debug_lib_dir = os.path.join(lib_path, "Debug")
release_lib_dir = os.path.join(lib_path, "Release")
if os.path.exists(debug_lib_dir):
lib_path = debug_lib_dir
elif os.path.exists(release_lib_dir):
lib_path = release_lib_dir
src = os.path.join(lib_path, filename)
dst = os.path.join(os.path.realpath(self.build_lib), "onnx", filename)
self.copy_file(src, dst)
class mypy_type_check(ONNXCommand):
description = 'Run MyPy type checker'
def run(self):
"""Run command."""
onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py"))
returncode = subprocess.call([sys.executable, onnx_script])
sys.exit(returncode)
cmdclass = {
'create_version': create_version,
'cmake_build': cmake_build,
'build_py': build_py,
'develop': develop,
'build_ext': build_ext,
'typecheck': mypy_type_check,
}
################################################################################
# Extensions
################################################################################
ext_modules = [
setuptools.Extension(
name=str('onnx.onnx_cpp2py_export'),
sources=[])
]
################################################################################
# Packages
################################################################################
# no need to do fancy stuff so far
packages = setuptools.find_packages()
install_requires.extend([
'protobuf',
'numpy',
'six',
'typing>=3.6.4; python_version < "3.5"',
'typing-extensions>=3.6.2.1',
])
################################################################################
# Test
################################################################################
setup_requires.append('pytest-runner')
tests_require.append('pytest')
tests_require.append('nbval')
tests_require.append('tabulate')
if sys.version_info[0] == 3:
# Mypy doesn't work with Python 2
extras_require['mypy'] = ['mypy==0.600']
################################################################################
# Final
################################################################################
setuptools.setup(
name="onnx",
version=VersionInfo.version,
description="Open Neural Network Exchange",
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
include_package_data=True,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
author='bddppq',
author_email='[email protected]',
url='https://github.com/onnx/onnx',
entry_points={
'console_scripts': [
'check-model = onnx.bin.checker:check_model',
'check-node = onnx.bin.checker:check_node',
'backend-test-tools = onnx.backend.test.cmd_tools:main',
]
},
)
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import tempfile
MYPY = False
if MYPY:
from typing import Text
def parse_args(): # type: () -> argparse.Namespace
parser = argparse.ArgumentParser(os.path.basename(__file__))
parser.add_argument('-r', '--root',
default=os.path.dirname(
os.path.dirname(os.path.abspath(__file__))),
help='onnx root directory (default: %(default)s)')
parser.add_argument('-o', '--out', required=True,
help='output directory')
return parser.parse_args()
def gen_trace_file(root_dir, out_path): # type: (Text, Text) -> None
subprocess.check_output([
'lcov',
'-c',
'-d',
root_dir,
'--no-external',
'--path',
root_dir,
'-o',
out_path])
subprocess.check_output([
'lcov',
'-r',
out_path,
os.path.join(root_dir, 'third_party', '*'),
'-o',
out_path])
subprocess.check_output([
'lcov',
'-r',
out_path,
os.path.join(root_dir, '.setuptools-cmake-build', '*'),
'-o',
out_path
])
def gen_html_files(root_dir, trace_path, out_dir): # type: (Text, Text, Text) -> None
subprocess.check_output([
'genhtml',
trace_path,
'-p',
root_dir,
'-o',
out_dir,
])
def main(): # type: () -> None
args = parse_args()
root = os.path.abspath(args.root)
out = os.path.abspath(args.out)
if not os.path.exists(out):
os.makedirs(out)
trace_path = os.path.join(out, 'onnx-coverage.info')
gen_trace_file(root, trace_path)
html_dir = os.path.join(out, 'html')
gen_html_files(root, trace_path, html_dir)
print('Static HTML files have been generated at:\n\t{}'.format(html_dir))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Taken from https://github.com/dropbox/mypy-protobuf/blob/d984389124eae6dbbb517f766b9266bb32171510/python/protoc-gen-mypy
# (Apache 2.0 License)
# with own fixes to
# - appease flake8
# - exit without error when protobuf isn't installed
# - fix recognition of whether an identifier is defined locally
# (unfortunately, we use a python package name ONNX_NAMESPACE_FOO_BAR_FOR_CI
# on CI, which by the original protoc-gen-mypy script was recognized to be
# camel case and therefore handled as an entry in the local package)
"""Protoc Plugin to generate mypy stubs. Loosely based on @zbarsky's go implementation"""
from __future__ import (
absolute_import,
division,
print_function,
)
import sys
from collections import defaultdict
from contextlib import contextmanager
try:
import google.protobuf.descriptor_pb2 as d_typed
import six
from google.protobuf.compiler import plugin_pb2 as plugin
except ImportError as e:
sys.stderr.write('Failed to generate mypy stubs: {}\n'.format(e))
sys.exit(0)
MYPY = False
if MYPY:
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Set,
Text,
cast,
)
else:
# Provide minimal mypy identifiers to make code run without typing module present
Text = None
def cast(type, value):
return value
# Hax to get around fact that google protobuf libraries aren't in typeshed yet
d = d_typed # type: Any
GENERATED = "@ge" + "nerated" # So phabricator doesn't think this file is generated
HEADER = "# {} by generate_proto_mypy_stubs.py. Do not edit!\n".format(GENERATED)
class PkgWriter(object):
"""Writes a single pyi file"""
def __init__(self, fd, descriptors):
# type: (d.FileDescriptorProto, Descriptors) -> None
self.fd = fd
self.descriptors = descriptors
self.lines = [] # type: List[Text]
self.indent = ""
# dictionary of x->y for `from {x} import {y}`
self.imports = defaultdict(set) # type: Dict[Text, Set[Text]]
self.locals = set() # type: Set[Text]
def _import(self, path, name):
# type: (Text, Text) -> Text
"""Imports a stdlib path and returns a handle to it
eg. self._import("typing", "Optional") -> "Optional"
"""
imp = path.replace('/', '.')
self.imports[imp].add(name)
return name
def _import_message(self, type_name):
# type: (d.FieldDescriptorProto) -> Text
"""Import a referenced message and return a handle"""
name = cast(Text, type_name)
if name[0] == '.' and name[1].isupper() and name[2].islower():
# Message defined in this file
return name[1:]
message_fd = self.descriptors.message_to_fd[name]
if message_fd.name == self.fd.name:
# message defined in this package
split = name.split('.')
for i, segment in enumerate(split):
if segment and segment[0].isupper() and segment[1].islower():
return ".".join(split[i:])
# Not in package. Must import
split = name.split(".")
for i, segment in enumerate(split):
if segment and segment[0].isupper() and segment[1].islower():
assert message_fd.name.endswith('.proto')
import_name = self._import(message_fd.name[:-6].replace('-', '_') + "_pb2", segment)
remains = ".".join(split[i + 1:])
if not remains:
return import_name
raise AssertionError("Don't support nested imports yet")
# return new_nested_import(import_name, remains)
raise AssertionError("Could not parse local name " + name)
@contextmanager # type: ignore
def _indent(self):
# type: () -> Generator
self.indent = self.indent + " "
yield
self.indent = self.indent[:-4]
def _write_line(self, line, *args):
# type: (Text, *Text) -> None
self.lines.append(self.indent + line.format(*args))
def write_enums(self, enums):
# type: (List[d.EnumDescriptorProto]) -> None
line = self._write_line
for enum in enums:
line("class {}(int):", enum.name)
with self._indent():
line("@classmethod")
line("def Name(cls, number: int) -> str: ...")
line("@classmethod")
line("def Value(cls, name: str) -> int: ...")
line("@classmethod")
line("def keys(cls) -> {}[str]: ...",
self._import("typing", "List"))
line("@classmethod")
line("def values(cls) -> {}[int]: ...",
self._import("typing", "List"))
line("@classmethod")
line("def items(cls) -> {}[{}[str, int]]: ...",
self._import("typing", "List"),
self._import("typing", "Tuple"))
for val in enum.value:
line("{} = {}({}, {})", val.name, self._import("typing", "cast"), enum.name, val.number)
line("")
def write_messages(self, messages, prefix):
# type: (List[d.DescriptorProto], Text) -> None
line = self._write_line
message_class = self._import("google.protobuf.message", "Message")
for desc in messages:
self.locals.add(desc.name)
qualified_name = prefix + desc.name
line("class {}({}):", desc.name, message_class)
with self._indent():
# Nested enums/messages
self.write_enums(desc.enum_type)
self.write_messages(desc.nested_type, qualified_name + ".")
# Scalar fields
for field in [f for f in desc.field if is_scalar(f)]:
if field.label == d.FieldDescriptorProto.LABEL_REPEATED:
container = self._import("google.protobuf.internal.containers", "RepeatedScalarFieldContainer")
line("{} = ... # type: {}[{}]", field.name, container, self.python_type(field))
else:
line("{} = ... # type: {}", field.name, self.python_type(field))
line("")
# Getters for non-scalar fields
for field in [f for f in desc.field if not is_scalar(f)]:
line("@property")
if field.label == d.FieldDescriptorProto.LABEL_REPEATED:
msg = self.descriptors.messages[field.type_name]
if msg.options.map_entry:
# map generates a special Entry wrapper message
container = self._import("typing", "MutableMapping")
line("def {}(self) -> {}[{}, {}]: ...", field.name, container, self.python_type(msg.field[0]), self.python_type(msg.field[1]))
else:
container = self._import("google.protobuf.internal.containers", "RepeatedCompositeFieldContainer")
line("def {}(self) -> {}[{}]: ...", field.name, container, self.python_type(field))
else:
line("def {}(self) -> {}: ...", field.name, self.python_type(field))
line("")
# Constructor
line("def __init__(self,")
with self._indent():
# Required args
for field in [f for f in desc.field if f.label == d.FieldDescriptorProto.LABEL_REQUIRED]:
line("{} : {},", field.name, self.python_type(field))
for field in [f for f in desc.field if f.label != d.FieldDescriptorProto.LABEL_REQUIRED]:
if field.label == d.FieldDescriptorProto.LABEL_REPEATED:
if field.type_name != '' and self.descriptors.messages[field.type_name].options.map_entry:
msg = self.descriptors.messages[field.type_name]
line("{} : {}[{}[{}, {}]] = None,", field.name, self._import("typing", "Optional"),
self._import("typing", "Mapping"), self.python_type(msg.field[0]), self.python_type(msg.field[1]))
else:
line("{} : {}[{}[{}]] = None,", field.name, self._import("typing", "Optional"),
self._import("typing", "Iterable"), self.python_type(field))
else:
line("{} : {}[{}] = None,", field.name, self._import("typing", "Optional"),
self.python_type(field))
line(") -> None: ...")
# Standard message methods
line("@classmethod")
line("def FromString(cls, s: bytes) -> {}: ...", qualified_name)
line("def MergeFrom(self, other_msg: {}) -> None: ...", message_class)
line("def CopyFrom(self, other_msg: {}) -> None: ...", message_class)
line("")
def write_services(self, services):
# type: (d.ServiceDescriptorProto) -> None
line = self._write_line
for service in services:
# The service definition interface
line("class {}({}, metaclass={}):", service.name, self._import("google.protobuf.service", "Service"), self._import("abc", "ABCMeta"))
with self._indent():
for method in service.method:
line("@{}", self._import("abc", "abstractmethod"))
line("def {}(self,", method.name)
with self._indent():
line("rpc_controller: {},", self._import("google.protobuf.service", "RpcController"))
line("request: {},", self._import_message(method.input_type))
line("done: {}[{}[[{}], None]],",
self._import("typing", "Optional"),
self._import("typing", "Callable"),
self._import_message(method.output_type))
line(") -> {}[{}]: ...", self._import("concurrent.futures", "Future"), self._import_message(method.output_type))
# The stub client
line("class {}({}):", service.name + "_Stub", service.name)
with self._indent():
line("def __init__(self, rpc_channel: {}) -> None: ...",
self._import("google.protobuf.service", "RpcChannel"))
def python_type(self, field):
# type: (d.FieldDescriptorProto) -> Text
mapping = {
d.FieldDescriptorProto.TYPE_DOUBLE: lambda: "float",
d.FieldDescriptorProto.TYPE_FLOAT: lambda: "float",
d.FieldDescriptorProto.TYPE_INT64: lambda: "int",
d.FieldDescriptorProto.TYPE_UINT64: lambda: "int",
d.FieldDescriptorProto.TYPE_FIXED64: lambda: "int",
d.FieldDescriptorProto.TYPE_SFIXED64: lambda: "int",
d.FieldDescriptorProto.TYPE_SINT64: lambda: "int",
d.FieldDescriptorProto.TYPE_INT32: lambda: "int",
d.FieldDescriptorProto.TYPE_UINT32: lambda: "int",
d.FieldDescriptorProto.TYPE_FIXED32: lambda: "int",
d.FieldDescriptorProto.TYPE_SFIXED32: lambda: "int",
d.FieldDescriptorProto.TYPE_SINT32: lambda: "int",
d.FieldDescriptorProto.TYPE_BOOL: lambda: "bool",
d.FieldDescriptorProto.TYPE_STRING: lambda: self._import("typing", "Text"),
d.FieldDescriptorProto.TYPE_BYTES: lambda: "bytes",
d.FieldDescriptorProto.TYPE_ENUM: lambda: self._import_message(field.type_name),
d.FieldDescriptorProto.TYPE_MESSAGE: lambda: self._import_message(field.type_name),
d.FieldDescriptorProto.TYPE_GROUP: lambda: self._import_message(field.type_name),
} # type: Dict[int, Callable[[], Text]]
assert field.type in mapping, "Unrecognized type: " + field.type
return mapping[field.type]()
def write(self):
# type: () -> Text
imports = []
for pkg, items in six.iteritems(self.imports):
imports.append(u"from {} import (".format(pkg))
for item in sorted(items):
imports.append(u" {},".format(item))
imports.append(u")\n")
return "\n".join(imports + self.lines)
def is_scalar(fd):
# type: (d.FileDescriptorProto) -> bool
return not (
fd.type == d.FieldDescriptorProto.TYPE_MESSAGE
or fd.type == d.FieldDescriptorProto.TYPE_GROUP
)
def generate_mypy_stubs(descriptors, response):
# type: (Descriptors, plugin.CodeGeneratorResponse) -> None
for name, fd in six.iteritems(descriptors.to_generate):
pkg_writer = PkgWriter(fd, descriptors)
pkg_writer.write_enums(fd.enum_type)
pkg_writer.write_messages(fd.message_type, "")
pkg_writer.write_services(fd.service)
assert name == fd.name
assert fd.name.endswith('.proto')
output = response.file.add()
output.name = fd.name[:-6].replace('-', '_') + '_pb2.pyi'
output.content = HEADER + pkg_writer.write()
print("Writing mypy to", output.name, file=sys.stderr)
class Descriptors(object):
def __init__(self, request):
# type: (plugin.CodeGeneratorRequest) -> None
files = {f.name: f for f in request.proto_file}
to_generate = {n: files[n] for n in request.file_to_generate}
self.files = files # type: Dict[Text, d.FileDescriptorProto]
self.to_generate = to_generate # type: Dict[Text, d.FileDescriptorProto]
self.messages = {} # type: Dict[Text, d.DescriptorProto]
self.message_to_fd = {} # type: Dict[Text, d.FileDescriptorProto]
def _add_enums(enums, prefix, fd):
# type: (d.EnumDescriptorProto, d.FileDescriptorProto) -> None
for enum in enums:
self.message_to_fd[prefix + enum.name] = fd
def _add_messages(messages, prefix, fd):
# type: (d.DescriptorProto, d.FileDescriptorProto) -> None
for message in messages:
self.messages[prefix + message.name] = message
self.message_to_fd[prefix + message.name] = fd
sub_prefix = prefix + message.name + "."
_add_messages(message.nested_type, sub_prefix, fd)
_add_enums(message.enum_type, sub_prefix, fd)
for fd in request.proto_file:
start_prefix = "." + fd.package + "."
_add_messages(fd.message_type, start_prefix, fd)
_add_enums(fd.enum_type, start_prefix, fd)
def main():
# type: () -> None
# Read request message from stdin
if six.PY3:
data = sys.stdin.buffer.read()
else:
data = sys.stdin.read()
# Parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
# Create response
response = plugin.CodeGeneratorResponse()
# Generate mypy
generate_mypy_stubs(Descriptors(request), response)
# Serialise response message
output = response.SerializeToString()
# Write to stdout
if six.PY3:
sys.stdout.buffer.write(output)
else:
sys.stdout.write(output)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import subprocess
import os
def main(): # type: () -> None
try:
root_folder = os.path.realpath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.chdir(root_folder)
subprocess.check_call(["mypy", "."])
subprocess.check_call(["mypy", "--py2", "."])
exit(0)
except subprocess.CalledProcessError:
# Catch this exception because we don't want it to output a backtrace that would clutter the mypy output
exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import io
import os
import re
import glob
import subprocess
from textwrap import dedent
autogen_header = """\
//
// WARNING: This file is automatically generated! Please edit onnx.in.proto.
//
"""
LITE_OPTION = '''
// For using protobuf-lite
option optimize_for = LITE_RUNTIME;
'''
DEFAULT_PACKAGE_NAME = "onnx"
IF_ONNX_ML_REGEX = re.compile(r'\s*//\s*#if\s+ONNX-ML\s*$')
ENDIF_ONNX_ML_REGEX = re.compile(r'\s*//\s*#endif\s*$')
ELSE_ONNX_ML_REGEX = re.compile(r'\s*//\s*#else\s*$')
MYPY = False
if MYPY:
from typing import Iterable, Text
def process_ifs(lines, onnx_ml): # type: (Iterable[Text], bool) -> Iterable[Text]
in_if = 0
for line in lines:
if IF_ONNX_ML_REGEX.match(line):
assert 0 == in_if
in_if = 1
elif ELSE_ONNX_ML_REGEX.match(line):
assert 1 == in_if
in_if = 2
elif ENDIF_ONNX_ML_REGEX.match(line):
assert (1 == in_if or 2 == in_if)
in_if = 0
else:
if 0 == in_if:
yield line
elif (1 == in_if and onnx_ml):
yield line
elif (2 == in_if and not onnx_ml):
yield line
IMPORT_REGEX = re.compile(r'(\s*)import\s*"([^"]*)\.proto";\s*$')
PACKAGE_NAME_REGEX = re.compile(r'\{PACKAGE_NAME\}')
ML_REGEX = re.compile(r'(.*)\-ml')
def process_package_name(lines, package_name): # type: (Iterable[Text], Text) -> Iterable[Text]
need_rename = (package_name != DEFAULT_PACKAGE_NAME)
for line in lines:
m = IMPORT_REGEX.match(line) if need_rename else None
if m:
include_name = m.group(2)
ml = ML_REGEX.match(include_name)
if ml:
include_name = "{}_{}-ml".format(ml.group(1), package_name)
else:
include_name = "{}_{}".format(include_name, package_name)
yield m.group(1) + 'import "{}.proto";'.format(include_name)
else:
yield PACKAGE_NAME_REGEX.sub(package_name, line)
PROTO_SYNTAX_REGEX = re.compile(r'(\s*)syntax\s*=\s*"proto2"\s*;\s*$')
OPTIONAL_REGEX = re.compile(r'(\s*)optional\s(.*)$')
def convert_to_proto3(lines): # type: (Iterable[Text]) -> Iterable[Text]
for line in lines:
# Set the syntax specifier
m = PROTO_SYNTAX_REGEX.match(line)
if m:
yield m.group(1) + 'syntax = "proto3";'
continue
# Remove optional keywords
m = OPTIONAL_REGEX.match(line)
if m:
yield m.group(1) + m.group(2)
continue
# Rewrite import
m = IMPORT_REGEX.match(line)
if m:
yield m.group(1) + 'import "{}.proto3";'.format(m.group(2))
continue
yield line
def gen_proto3_code(protoc_path, proto3_path, include_path, cpp_out, python_out): # type: (Text, Text, Text, Text, Text) -> None
print("Generate pb3 code using {}".format(protoc_path))
build_args = [protoc_path, proto3_path, '-I', include_path]
build_args.extend(['--cpp_out', cpp_out, '--python_out', python_out])
subprocess.check_call(build_args)
def translate(source, proto, onnx_ml, package_name): # type: (Text, int, bool, Text) -> Text
lines = source.splitlines() # type: Iterable[Text]
lines = process_ifs(lines, onnx_ml=onnx_ml)
lines = process_package_name(lines, package_name=package_name)
if proto == 3:
lines = convert_to_proto3(lines)
else:
assert proto == 2
return "\n".join(lines) # TODO: not Windows friendly
def qualify(f, pardir=os.path.realpath(os.path.dirname(__file__))): # type: (Text, Text) -> Text
return os.path.join(pardir, f)
def convert(stem, package_name, output, do_onnx_ml=False, lite=False, protoc_path=''): # type: (Text, Text, Text, bool, bool, Text) -> None
proto_in = qualify("{}.in.proto".format(stem))
need_rename = (package_name != DEFAULT_PACKAGE_NAME)
if do_onnx_ml:
proto_base = "{}_{}-ml".format(stem, package_name) if need_rename else "{}-ml".format(stem)
else:
proto_base = "{}_{}".format(stem, package_name) if need_rename else "{}".format(stem)
proto = qualify("{}.proto".format(proto_base), pardir=output)
proto3 = qualify("{}.proto3".format(proto_base), pardir=output)
print("Processing {}".format(proto_in))
with io.open(proto_in, 'r') as fin:
source = fin.read()
print("Writing {}".format(proto))
with io.open(proto, 'w', newline='') as fout:
fout.write(autogen_header)
fout.write(translate(source, proto=2, onnx_ml=do_onnx_ml, package_name=package_name))
if lite:
fout.write(LITE_OPTION)
print("Writing {}".format(proto3))
with io.open(proto3, 'w', newline='') as fout:
fout.write(autogen_header)
fout.write(translate(source, proto=3, onnx_ml=do_onnx_ml, package_name=package_name))
if lite:
fout.write(LITE_OPTION)
if protoc_path:
porto3_dir = os.path.dirname(proto3)
base_dir = os.path.dirname(porto3_dir)
gen_proto3_code(protoc_path, proto3, base_dir, base_dir, base_dir)
pb3_files = glob.glob(os.path.join(porto3_dir, '*.proto3.*'))
for pb3_file in pb3_files:
print("Removing {}".format(pb3_file))
os.remove(pb3_file)
if need_rename:
if do_onnx_ml:
proto_header = qualify("{}-ml.pb.h".format(stem), pardir=output)
else:
proto_header = qualify("{}.pb.h".format(stem), pardir=output)
print("Writing {}".format(proto_header))
with io.open(proto_header, 'w', newline='') as fout:
fout.write("#pragma once\n")
fout.write("#include \"{}.pb.h\"\n".format(proto_base))
# Generate py mapping
# "-" is invalid in python module name, replaces '-' with '_'
pb_py = qualify('{}_pb.py'.format(stem.replace('-', '_')), pardir=output)
if need_rename:
pb2_py = qualify('{}_pb2.py'.format(proto_base.replace('-', '_')), pardir=output)
else:
if do_onnx_ml:
pb2_py = qualify('{}_ml_pb2.py'.format(stem.replace('-', '_')), pardir=output)
else:
pb2_py = qualify('{}_pb2.py'.format(stem.replace('-', '_')), pardir=output)
print('generating {}'.format(pb_py))
with open(pb_py, 'w') as f:
f.write(str(dedent('''\
# This file is generated by setup.py. DO NOT EDIT!
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .{} import * # noqa
'''.format(os.path.splitext(os.path.basename(pb2_py))[0]))))
def main(): # type: () -> None
parser = argparse.ArgumentParser(
description='Generates .proto file variations from .in.proto')
parser.add_argument('-p', '--package', default='onnx',
help='package name in the generated proto files'
' (default: %(default)s)')
parser.add_argument('-m', '--ml', action='store_true', help='ML mode')
parser.add_argument('-l', '--lite', action='store_true',
help='generate lite proto to use with protobuf-lite')
parser.add_argument('-o', '--output',
default=os.path.realpath(os.path.dirname(__file__)),
help='output directory (default: %(default)s)')
parser.add_argument('--protoc_path',
default='',
help='path to protoc for proto3 file validation')
parser.add_argument('stems', nargs='*', default=['onnx', 'onnx-operators'],
help='list of .in.proto file stems '
'(default: %(default)s)')
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
for stem in args.stems:
convert(stem,
package_name=args.package,
output=args.output,
do_onnx_ml=args.ml,
lite=args.lite,
protoc_path=args.protoc_path)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from .onnx_cpp2py_export import ONNX_ML
from onnx.external_data_helper import load_external_data_for_model, write_external_data_tensors
from .onnx_pb import * # noqa
from .onnx_operators_pb import * # noqa
from .version import version as __version__ # noqa
# Import common subpackages so they're available when you 'import onnx'
import onnx.helper # noqa
import onnx.checker # noqa
import onnx.defs # noqa
import google.protobuf.message
from typing import Union, Text, IO, Optional, cast, TypeVar, Any
from six import string_types
# f should be either readable or a file path
def _load_bytes(f): # type: (Union[IO[bytes], Text]) -> bytes
if hasattr(f, 'read') and callable(cast(IO[bytes], f).read):
s = cast(IO[bytes], f).read()
else:
with open(cast(Text, f), 'rb') as readable:
s = readable.read()
return s
# str should be bytes,
# f should be either writable or a file path
def _save_bytes(str, f): # type: (bytes, Union[IO[bytes], Text]) -> None
if hasattr(f, 'write') and callable(cast(IO[bytes], f).write):
cast(IO[bytes], f).write(str)
else:
with open(cast(Text, f), 'wb') as writable:
writable.write(str)
# f should be either a readable file or a file path
def _get_file_path(f): # type: (Union[IO[bytes], Text]) -> Optional[Text]
if isinstance(f, string_types):
return os.path.abspath(f)
if hasattr(f, 'name'):
return os.path.abspath(f.name)
return None
def _serialize(proto): # type: (Union[bytes, google.protobuf.message.Message]) -> bytes
'''
Serialize a in-memory proto to bytes
@params
proto is a in-memory proto, such as a ModelProto, TensorProto, etc
@return
Serialized proto in bytes
'''
if isinstance(proto, bytes):
return proto
elif hasattr(proto, 'SerializeToString') and callable(proto.SerializeToString):
result = proto.SerializeToString()
return result
else:
raise TypeError('No SerializeToString method is detected. '
'neither proto is a str.\ntype is {}'.format(type(proto)))
_Proto = TypeVar('_Proto', bound=google.protobuf.message.Message)
def _deserialize(s, proto): # type: (bytes, _Proto) -> _Proto
'''
Parse bytes into a in-memory proto
@params
s is bytes containing serialized proto
proto is a in-memory proto object
@return
The proto instance filled in by s
'''
if not isinstance(s, bytes):
raise ValueError('Parameter s must be bytes, but got type: {}'.format(type(s)))
if not (hasattr(proto, 'ParseFromString') and callable(proto.ParseFromString)):
raise ValueError('No ParseFromString method is detected. '
'\ntype is {}'.format(type(proto)))
decoded = cast(Optional[int], proto.ParseFromString(s))
if decoded is not None and decoded != len(s):
raise google.protobuf.message.DecodeError(
"Protobuf decoding consumed too few bytes: {} out of {}".format(
decoded, len(s)))
return proto
def load_model(f, format=None, load_external_data=True): # type: (Union[IO[bytes], Text], Optional[Any], bool) -> ModelProto
'''
Loads a serialized ModelProto into memory
@params
f can be a file-like object (has "read" function) or a string containing a file name
format is for future use
@return
Loaded in-memory ModelProto
'''
s = _load_bytes(f)
model = load_model_from_string(s, format=format)
if load_external_data:
model_filepath = _get_file_path(f)
if model_filepath:
base_dir = os.path.dirname(model_filepath)
load_external_data_for_model(model, base_dir)
return model
def load_tensor(f, format=None): # type: (Union[IO[bytes], Text], Optional[Any]) -> TensorProto
'''
Loads a serialized TensorProto into memory
@params
f can be a file-like object (has "read" function) or a string containing a file name
format is for future use
@return
Loaded in-memory TensorProto
'''
s = _load_bytes(f)
return load_tensor_from_string(s, format=format)
def load_model_from_string(s, format=None): # type: (bytes, Optional[Any]) -> ModelProto
'''
Loads a binary string (bytes) that contains serialized ModelProto
@params
s is a string, which contains serialized ModelProto
format is for future use
@return
Loaded in-memory ModelProto
'''
return _deserialize(s, ModelProto())
def load_tensor_from_string(s, format=None): # type: (bytes, Optional[Any]) -> TensorProto
'''
Loads a binary string (bytes) that contains serialized TensorProto
@params
s is a string, which contains serialized TensorProto
format is for future use
@return
Loaded in-memory TensorProto
'''
return _deserialize(s, TensorProto())
def save_model(proto, f, format=None): # type: (Union[ModelProto, bytes], Union[IO[bytes], Text], Optional[Any]) -> None
'''
Saves the ModelProto to the specified path.
@params
proto should be a in-memory ModelProto
f can be a file-like object (has "write" function) or a string containing a file name
format is for future use
'''
if isinstance(proto, bytes):
proto = _deserialize(proto, ModelProto())
model_filepath = _get_file_path(f)
if model_filepath:
basepath = os.path.dirname(model_filepath)
proto = write_external_data_tensors(proto, basepath)
s = _serialize(proto)
_save_bytes(s, f)
def save_tensor(proto, f): # type: (TensorProto, Union[IO[bytes], Text]) -> None
'''
Saves the TensorProto to the specified path.
@params
proto should be a in-memory TensorProto
f can be a file-like object (has "write" function) or a string containing a file name
format is for future use
'''
s = _serialize(proto)
_save_bytes(s, f)
# For backward compatibility
load = load_model
load_from_string = load_model_from_string
save = save_model
|
"""onnx shape inference. Shape inference is not guaranteed to be
complete.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.shape_inference as C
from onnx import ModelProto
"""Apply shape inference to the provided ModelProto.
Inferred shapes are added to the value_info field of the graph.
If the inferred values conflict with values already provided in the
graph, that means that the provided values are invalid (or there is a
bug in shape inference), and the result is unspecified.
Arguments:
input (ModelProto,bool): ModelProto
Return:
return (ModelProto) model with inferred shape information
"""
def infer_shapes(model, check_type=False): # type: (ModelProto,bool) -> ModelProto
if not isinstance(model, ModelProto):
raise TypeError('Shape inference only accepts ModelProto, '
'incorrect type: {}'.format(type(model)))
model_str = model.SerializeToString()
inferred_model_str = C.infer_shapes(model_str, check_type)
return onnx.load_from_string(inferred_model_str)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx.checker
import onnx.helper
import onnx.optimizer
import onnx.shape_inference
from onnx import ModelProto
def polish_model(model): # type: (ModelProto) -> ModelProto
'''
This function combines several useful utility functions together.
'''
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
model = onnx.optimizer.optimize(model)
onnx.checker.check_model(model)
return model
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import platform
import numpy as np # type: ignore
from onnx import TensorProto
from onnx import mapping
from six import text_type, binary_type
from typing import Sequence, Any, Optional, Text, List
if platform.system() != 'AIX' and sys.byteorder != 'little':
raise RuntimeError(
'Numpy helper for tensor/ndarray is not available on big endian '
'systems yet.')
def combine_pairs_to_complex(fa): # type: (Sequence[int]) -> Sequence[np.complex64]
return [complex(fa[i * 2], fa[i * 2 + 1]) for i in range(len(fa) // 2)]
def to_array(tensor): # type: (TensorProto) -> np.ndarray[Any]
"""Converts a tensor def object to a numpy array.
Inputs:
tensor: a TensorProto object.
Returns:
arr: the converted array.
"""
if tensor.HasField("segment"):
raise ValueError(
"Currently not supporting loading segments.")
if tensor.data_type == TensorProto.UNDEFINED:
raise TypeError("The element type in the input tensor is not defined.")
tensor_dtype = tensor.data_type
np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype]
storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype]
storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type]
storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type]
dims = tensor.dims
if tensor.data_type == TensorProto.STRING:
utf8_strings = getattr(tensor, storage_field)
ss = list(s.decode('utf-8') for s in utf8_strings)
return np.asarray(ss).astype(np_dtype).reshape(dims)
if tensor.HasField("raw_data"):
# Raw_bytes support: using frombuffer.
return np.frombuffer(
tensor.raw_data,
dtype=np_dtype).reshape(dims)
else:
data = getattr(tensor, storage_field), # type: Sequence[np.complex64]
if (tensor_dtype == TensorProto.COMPLEX64
or tensor_dtype == TensorProto.COMPLEX128):
data = combine_pairs_to_complex(data)
return (
np.asarray(
data,
dtype=storage_np_dtype)
.astype(np_dtype)
.reshape(dims)
)
def from_array(arr, name=None): # type: (np.ndarray[Any], Optional[Text]) -> TensorProto
"""Converts a numpy array to a tensor def.
Inputs:
arr: a numpy array.
name: (optional) the name of the tensor.
Returns:
tensor_def: the converted tensor def.
"""
tensor = TensorProto()
tensor.dims.extend(arr.shape)
if name:
tensor.name = name
if arr.dtype == np.object:
# Special care for strings.
tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
# TODO: Introduce full string support.
# We flatten the array in case there are 2-D arrays are specified
# We throw the error below if we have a 3-D array or some kind of other
# object. If you want more complex shapes then follow the below instructions.
# Unlike other types where the shape is automatically inferred from
# nested arrays of values, the only reliable way now to feed strings
# is to put them into a flat array then specify type astype(np.object)
# (otherwise all strings may have different types depending on their length)
# and then specify shape .reshape([x, y, z])
flat_array = arr.flatten()
for e in flat_array:
if isinstance(e, text_type):
tensor.string_data.append(e.encode('utf-8'))
elif isinstance(e, np.ndarray):
for s in e:
if isinstance(s, text_type):
tensor.string_data.append(s.encode('utf-8'))
else:
raise NotImplementedError(
"Unrecognized object in the object array, expect a string, or array of bytes: ", str(type(e)))
return tensor
# For numerical types, directly use numpy raw bytes.
try:
dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
except KeyError:
raise RuntimeError(
"Numpy data type not understood yet: {}".format(str(arr.dtype)))
tensor.data_type = dtype
tensor.raw_data = arr.tobytes() # note: tobytes() is only after 1.9.
return tensor
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import numbers
from six import text_type, integer_types, binary_type
import google.protobuf.message
from onnx import TensorProto, SparseTensorProto, AttributeProto, ValueInfoProto, TensorShapeProto, \
NodeProto, ModelProto, GraphProto, OperatorSetIdProto, TypeProto, IR_VERSION
import onnx.defs as defs
from onnx import mapping
from onnx.mapping import STORAGE_TENSOR_TYPE_TO_FIELD
from typing import Text, Sequence, Any, Optional, Dict, Union, TypeVar, Callable, Tuple, List, cast
import numpy as np # type: ignore
def make_node(
op_type, # type: Text
inputs, # type: Sequence[Text]
outputs, # type: Sequence[Text]
name=None, # type: Optional[Text]
doc_string=None, # type: Optional[Text]
domain=None, # type: Optional[Text]
**kwargs # type: Any
): # type: (...) -> NodeProto
"""Construct a NodeProto.
Arguments:
op_type (string): The name of the operator to construct
inputs (list of string): list of input names
outputs (list of string): list of output names
name (string, default None): optional unique identifier for NodeProto
doc_string (string, default None): optional documentation string for NodeProto
domain (string, default None): optional domain for NodeProto.
If it's None, we will just use default domain (which is empty)
**kwargs (dict): the attributes of the node. The acceptable values
are documented in :func:`make_attribute`.
"""
node = NodeProto()
node.op_type = op_type
node.input.extend(inputs)
node.output.extend(outputs)
if name:
node.name = name
if doc_string:
node.doc_string = doc_string
if domain is not None:
node.domain = domain
if kwargs:
node.attribute.extend(
make_attribute(key, value)
for key, value in sorted(kwargs.items()))
return node
def make_operatorsetid(
domain, # type: Text
version, # type: int
): # type: (...) -> OperatorSetIdProto
"""Construct an OperatorSetIdProto.
Arguments:
domain (string): The domain of the operator set id
version (integer): Version of operator set id
"""
operatorsetid = OperatorSetIdProto()
operatorsetid.domain = domain
operatorsetid.version = version
return operatorsetid
def make_graph(
nodes, # type: Sequence[NodeProto]
name, # type: Text
inputs, # type: Sequence[ValueInfoProto]
outputs, # type: Sequence[ValueInfoProto]
initializer=None, # type: Optional[Sequence[TensorProto]]
doc_string=None, # type: Optional[Text]
value_info=[], # type: Sequence[ValueInfoProto]
): # type: (...) -> GraphProto
if initializer is None:
initializer = []
if value_info is None:
value_info = []
graph = GraphProto()
graph.node.extend(nodes)
graph.name = name
graph.input.extend(inputs)
graph.output.extend(outputs)
graph.initializer.extend(initializer)
graph.value_info.extend(value_info)
if doc_string:
graph.doc_string = doc_string
return graph
def make_opsetid(domain, version): # type: (Text, int) -> OperatorSetIdProto
opsetid = OperatorSetIdProto()
opsetid.domain = domain
opsetid.version = version
return opsetid
def make_model(graph, **kwargs): # type: (GraphProto, **Any) -> ModelProto
model = ModelProto()
# Touch model.ir_version so it is stored as the version from which it is
# generated.
model.ir_version = IR_VERSION
model.graph.CopyFrom(graph)
opset_imports = None # type: Optional[Sequence[OperatorSetIdProto]]
opset_imports = kwargs.pop('opset_imports', None) # type: ignore
if opset_imports is not None:
model.opset_import.extend(opset_imports)
else:
# Default import
imp = model.opset_import.add()
imp.version = defs.onnx_opset_version()
for k, v in kwargs.items():
# TODO: Does this work with repeated fields?
setattr(model, k, v)
return model
def set_model_props(model, dict_value): # type: (ModelProto, Dict[Text, Text]) -> None
del model.metadata_props[:]
for (k, v) in dict_value.items():
entry = model.metadata_props.add()
entry.key = k
entry.value = v
# model.metadata_properties.append(entry)
def split_complex_to_pairs(ca): # type: (Sequence[np.complex64]) -> Sequence[int]
return [(ca[i // 2].real if (i % 2 == 0) else ca[i // 2].imag)
for i in range(len(ca) * 2)]
def make_tensor(
name, # type: Text
data_type, # type: int
dims, # type: Sequence[int]
vals, # type: Any
raw=False # type: bool
): # type: (...) -> TensorProto
'''
Make a TensorProto with specified arguments. If raw is False, this
function will choose the corresponding proto field to store the
values based on data_type. If raw is True, use "raw_data" proto
field to store the values, and values should be of type bytes in
this case.
'''
tensor = TensorProto()
tensor.data_type = data_type
tensor.name = name
if data_type == TensorProto.STRING:
assert not raw, "Can not use raw_data to store string type"
if (data_type == TensorProto.COMPLEX64
or data_type == TensorProto.COMPLEX128):
vals = split_complex_to_pairs(vals)
if raw:
tensor.raw_data = vals
else:
field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
getattr(tensor, field).extend(vals)
tensor.dims.extend(dims)
return tensor
def make_sparse_tensor(
values, # type: TensorProto
indices, # type: TensorProto
dims # type: Sequence[int]
): # type: (...) -> SparseTensorProto
sparse = SparseTensorProto()
sparse.values.CopyFrom(values)
sparse.indices.CopyFrom(indices)
sparse.dims.extend(dims)
return sparse
def _to_bytes_or_false(val): # type: (Union[Text, bytes]) -> Union[bytes, bool]
"""An internal graph to convert the input to a bytes or to False.
The criteria for conversion is as follows and should be python 2 and 3
compatible:
- If val is py2 str or py3 bytes: return bytes
- If val is py2 unicode or py3 str: return val.decode('utf-8')
- Otherwise, return False
"""
if isinstance(val, bytes):
return val
try:
return val.encode('utf-8')
except AttributeError:
return False
def make_attribute(
key, # type: Text
value, # type: Any
doc_string=None # type: Optional[Text]
): # type: (...) -> AttributeProto
"""Makes an AttributeProto based on the value type."""
attr = AttributeProto()
attr.name = key
if doc_string:
attr.doc_string = doc_string
is_iterable = isinstance(value, collections.Iterable)
bytes_or_false = _to_bytes_or_false(value)
# First, singular cases
# float
if isinstance(value, float):
attr.f = value
attr.type = AttributeProto.FLOAT
# integer
elif isinstance(value, numbers.Integral):
attr.i = cast(int, value)
attr.type = AttributeProto.INT
# string
elif bytes_or_false is not False:
assert isinstance(bytes_or_false, bytes)
attr.s = bytes_or_false
attr.type = AttributeProto.STRING
elif isinstance(value, TensorProto):
attr.t.CopyFrom(value)
attr.type = AttributeProto.TENSOR
elif isinstance(value, SparseTensorProto):
attr.sparse_tensor.CopyFrom(value)
attr.type = AttributeProto.SPARSE_TENSOR
elif isinstance(value, GraphProto):
attr.g.CopyFrom(value)
attr.type = AttributeProto.GRAPH
# third, iterable cases
elif is_iterable:
byte_array = [_to_bytes_or_false(v) for v in value]
if all(isinstance(v, float) for v in value):
attr.floats.extend(value)
attr.type = AttributeProto.FLOATS
elif all(isinstance(v, numbers.Integral) for v in value):
# Turn np.int32/64 into Python built-in int.
attr.ints.extend(int(v) for v in value)
attr.type = AttributeProto.INTS
elif all(map(lambda bytes_or_false: bytes_or_false is not False, byte_array)):
attr.strings.extend(cast(List[bytes], byte_array))
attr.type = AttributeProto.STRINGS
elif all(isinstance(v, TensorProto) for v in value):
attr.tensors.extend(value)
attr.type = AttributeProto.TENSORS
elif all(isinstance(v, SparseTensorProto) for v in value):
attr.sparse_tensors.extend(value)
attr.type = AttributeProto.SPARSE_TENSORS
elif all(isinstance(v, GraphProto) for v in value):
attr.graphs.extend(value)
attr.type = AttributeProto.GRAPHS
else:
raise ValueError(
"You passed in an iterable attribute but I cannot figure out "
"its applicable type.")
else:
raise TypeError(
'value "{}" is not valid attribute data type.'.format(value))
return attr
def get_attribute_value(attr): # type: (AttributeProto) -> Any
if attr.type == AttributeProto.FLOAT:
return attr.f
if attr.type == AttributeProto.INT:
return attr.i
if attr.type == AttributeProto.STRING:
return attr.s
if attr.type == AttributeProto.TENSOR:
return attr.t
if attr.type == AttributeProto.GRAPH:
return attr.g
if attr.type == AttributeProto.FLOATS:
return list(attr.floats)
if attr.type == AttributeProto.INTS:
return list(attr.ints)
if attr.type == AttributeProto.STRINGS:
return list(attr.strings)
if attr.type == AttributeProto.TENSORS:
return list(attr.tensors)
if attr.type == AttributeProto.GRAPHS:
return list(attr.graphs)
raise ValueError("Unsupported ONNX attribute: {}".format(attr))
def make_empty_tensor_value_info(name): # type: (Text) -> ValueInfoProto
value_info_proto = ValueInfoProto()
value_info_proto.name = name
return value_info_proto
def make_tensor_value_info(
name, # type: Text
elem_type, # type: int
shape, # type: Optional[Sequence[Union[Text, int]]]
doc_string="", # type: Text
shape_denotation=None, # type: Optional[List[Text]]
): # type: (...) -> ValueInfoProto
"""Makes a ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = value_info_proto.type.tensor_type
tensor_type_proto.elem_type = elem_type
tensor_shape_proto = tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
tensor_shape_proto.dim.extend([])
if shape_denotation:
if len(shape_denotation) != len(shape):
raise ValueError(
'Invalid shape_denotation. '
'Must be of the same length as shape.')
for i, d in enumerate(shape):
dim = tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, integer_types):
dim.dim_value = d
elif isinstance(d, text_type):
dim.dim_param = d
else:
raise ValueError(
'Invalid item in shape: {}. '
'Needs to of integer_types or text_type.'.format(d))
if shape_denotation:
dim.denotation = shape_denotation[i]
return value_info_proto
def make_sequence_value_info(
name, # type: Text
elem_type, # type: int
shape, # type: Optional[Sequence[Union[Text, int]]]
doc_string="", # type: Text
elem_shape_denotation=None, # type: Optional[List[Text]]
): # type: (...) -> ValueInfoProto
"""Makes a ValueInfoProto based on the data type and shape for Sequence."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
sequence_type_proto = value_info_proto.type.sequence_type
sequence_type_proto.elem_type.tensor_type.elem_type = elem_type
tensor_value_info = make_tensor_value_info(name, elem_type, shape, doc_string, elem_shape_denotation)
if shape is not None:
sequence_type_proto.elem_type.tensor_type.shape.CopyFrom(tensor_value_info.type.tensor_type.shape)
return value_info_proto
def _sanitize_str(s): # type: (Union[Text, bytes]) -> Text
if isinstance(s, text_type):
sanitized = s
elif isinstance(s, binary_type):
sanitized = s.decode('utf-8', errors='ignore')
else:
sanitized = str(s)
if len(sanitized) < 64:
return sanitized
return sanitized[:64] + '...<+len=%d>' % (len(sanitized) - 64)
def printable_attribute(attr, subgraphs=False): # type: (AttributeProto, bool) -> Union[Text, Tuple[Text, List[GraphProto]]]
content = []
content.append(attr.name)
content.append("=")
def str_float(f): # type: (float) -> Text
# NB: Different Python versions print different numbers of trailing
# decimals, specifying this explicitly keeps it consistent for all
# versions
return '{:.15g}'.format(f)
def str_int(i): # type: (int) -> Text
# NB: In Python 2, longs will repr() as '2L', which is ugly and
# unnecessary. Explicitly format it to keep it consistent.
return '{:d}'.format(i)
def str_str(s): # type: (Text) -> Text
return repr(s)
_T = TypeVar('_T') # noqa
def str_list(str_elem, xs): # type: (Callable[[_T], Text], Sequence[_T]) -> Text
return '[' + ', '.join(map(str_elem, xs)) + ']'
# for now, this logic should continue to work as long as we are running on a proto3
# implementation. If/when we switch to proto3, we will need to use attr.type
# To support printing subgraphs, if we find a graph attribute, print out
# its name here and pass the graph itself up to the caller for later
# printing.
graphs = []
if attr.HasField("f"):
content.append(str_float(attr.f))
elif attr.HasField("i"):
content.append(str_int(attr.i))
elif attr.HasField("s"):
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(repr(_sanitize_str(attr.s)))
elif attr.HasField("t"):
if len(attr.t.dims) > 0:
content.append("<Tensor>")
else:
# special case to print scalars
field = STORAGE_TENSOR_TYPE_TO_FIELD[attr.t.data_type]
content.append('<Scalar Tensor {}>'.format(str(getattr(attr.t, field))))
elif attr.HasField("g"):
content.append("<graph {}>".format(attr.g.name))
graphs.append(attr.g)
elif attr.floats:
content.append(str_list(str_float, attr.floats))
elif attr.ints:
content.append(str_list(str_int, attr.ints))
elif attr.strings:
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(str(list(map(_sanitize_str, attr.strings))))
elif attr.tensors:
content.append("[<Tensor>, ...]")
elif attr.graphs:
content.append('[')
for i, g in enumerate(attr.graphs):
comma = ',' if i != len(attr.graphs) - 1 else ''
content.append('<graph {}>{}'.format(g.name, comma))
content.append(']')
graphs.extend(attr.graphs)
else:
content.append("<Unknown>")
if subgraphs:
return ' '.join(content), graphs
else:
return ' '.join(content)
def printable_dim(dim): # type: (TensorShapeProto.Dimension) -> Text
which = dim.WhichOneof('value')
assert which is not None
return str(getattr(dim, which))
def printable_type(t): # type: (TypeProto) -> Text
if t.WhichOneof('value') == "tensor_type":
s = TensorProto.DataType.Name(t.tensor_type.elem_type)
if t.tensor_type.HasField('shape'):
if len(t.tensor_type.shape.dim):
s += str(', ' + 'x'.join(map(printable_dim, t.tensor_type.shape.dim)))
else:
s += str(', scalar')
return s
if t.WhichOneof('value') is None:
return ""
return 'Unknown type {}'.format(t.WhichOneof('value'))
def printable_value_info(v): # type: (ValueInfoProto) -> Text
s = '%{}'.format(v.name)
if v.type:
s = '{}[{}]'.format(s, printable_type(v.type))
return s
def printable_tensor_proto(t): # type: (TensorProto) -> Text
s = '%{}['.format(t.name)
s += TensorProto.DataType.Name(t.data_type)
if t.dims is not None:
if len(t.dims):
s += str(', ' + 'x'.join(map(str, t.dims)))
else:
s += str(', scalar')
s += ']'
return s
def printable_node(node, prefix='', subgraphs=False): # type: (NodeProto, Text, bool) -> Union[Text, Tuple[Text, List[GraphProto]]]
content = []
if len(node.output):
content.append(
', '.join(['%{}'.format(name) for name in node.output]))
content.append('=')
# To deal with nested graphs
graphs = [] # type: List[GraphProto]
printed_attrs = []
for attr in node.attribute:
if subgraphs:
printed_attr, gs = printable_attribute(attr, subgraphs)
assert isinstance(gs, list)
graphs.extend(gs)
printed_attrs.append(printed_attr)
else:
printed = printable_attribute(attr)
assert isinstance(printed, Text)
printed_attrs.append(printed)
printed_attributes = ', '.join(sorted(printed_attrs))
printed_inputs = ', '.join(['%{}'.format(name) for name in node.input])
if node.attribute:
content.append("{}[{}]({})".format(node.op_type, printed_attributes, printed_inputs))
else:
content.append("{}({})".format(node.op_type, printed_inputs))
if subgraphs:
return prefix + ' '.join(content), graphs
else:
return prefix + ' '.join(content)
def printable_graph(graph, prefix=''): # type: (GraphProto, Text) -> Text
content = []
indent = prefix + ' '
# header
header = ['graph', graph.name]
initializers = {t.name for t in graph.initializer}
if len(graph.input):
header.append("(")
in_strs = [] # required inputs
in_with_init_strs = [] # optional inputs with initializer providing default value
for inp in graph.input:
if inp.name not in initializers:
in_strs.append(printable_value_info(inp))
else:
in_with_init_strs.append(printable_value_info(inp))
if in_strs:
content.append(prefix + ' '.join(header))
header = []
for line in in_strs:
content.append(prefix + ' ' + line)
header.append(")")
if in_with_init_strs:
header.append("optional inputs with matching initializers (")
content.append(prefix + ' '.join(header))
header = []
for line in in_with_init_strs:
content.append(prefix + ' ' + line)
header.append(")")
# from IR 4 onwards an initializer is not required to have a matching graph input
# so output the name, type and shape of those as well
if len(in_with_init_strs) < len(initializers):
graph_inputs = {i.name for i in graph.input}
init_strs = [printable_tensor_proto(i) for i in graph.initializer
if i.name not in graph_inputs]
header.append("initializers (")
content.append(prefix + ' '.join(header))
header = []
for line in init_strs:
content.append(prefix + ' ' + line)
header.append(")")
header.append('{')
content.append(prefix + ' '.join(header))
graphs = [] # type: List[GraphProto]
# body
for node in graph.node:
pn, gs = printable_node(node, indent, subgraphs=True)
assert isinstance(gs, list)
content.append(pn)
graphs.extend(gs)
# tail
tail = ['return']
if len(graph.output):
tail.append(
', '.join(['%{}'.format(out.name) for out in graph.output]))
content.append(indent + ' '.join(tail))
# closing bracket
content.append(prefix + '}')
for g in graphs:
content.append('\n' + printable_graph(g))
return '\n'.join(content)
def strip_doc_string(proto): # type: (google.protobuf.message.Message) -> None
"""
Empties `doc_string` field on any nested protobuf messages
"""
assert isinstance(proto, google.protobuf.message.Message)
for descriptor in proto.DESCRIPTOR.fields:
if descriptor.name == 'doc_string':
proto.ClearField(descriptor.name)
elif descriptor.type == descriptor.TYPE_MESSAGE:
if descriptor.label == descriptor.LABEL_REPEATED:
for x in getattr(proto, descriptor.name):
strip_doc_string(x)
elif proto.HasField(descriptor.name):
strip_doc_string(getattr(proto, descriptor.name))
|
# ATTENTION: The code in this file is highly EXPERIMENTAL.
# Adventurous users should note that the APIs will probably change.
"""onnx optimizer
This enables users to optimize their models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.optimizer as C
from onnx import ModelProto
from typing import Text, Sequence, Optional
"""Apply the optimization on the serialized ModelProto.
Arguments:
input (ModelProto): model
names (list of string): list of optimization names
Return:
return (ModelProto) optimized model
Supported pass names:
-- nop
-- eliminate_identity
-- eliminate_nop_transpose
-- eliminate_nop_pad
-- eliminate_unused_initializer
-- fuse_consecutive_squeezes
-- fuse_consecutive_transposes
-- fuse_add_bias_into_conv
-- fuse_transpose_into_gemm
"""
get_available_passes = C.get_available_passes
def optimize(model, passes=None, fixed_point=False): # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto
if passes is None:
passes = ['eliminate_nop_transpose',
'eliminate_nop_pad',
'fuse_consecutive_transposes',
'fuse_transpose_into_gemm']
if not isinstance(model, ModelProto):
raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))
model_str = model.SerializeToString()
if fixed_point:
optimized_model_str = C.optimize_fixedpoint(model_str, passes)
else:
optimized_model_str = C.optimize(model_str, passes)
return onnx.load_from_string(optimized_model_str)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import uuid
import os
from itertools import chain
from typing import Iterable, Text, Optional
from .onnx_pb import TensorProto, ModelProto
class ExternalDataInfo(object):
def __init__(self, tensor): # type: (TensorProto) -> None
self.location = ''
self.offset = None
self.length = None
self.checksum = None
self.basepath = ''
for entry in tensor.external_data:
setattr(self, entry.key, entry.value)
if self.offset:
self.offset = int(self.offset)
if self.length:
self.length = int(self.length)
def load_external_data_for_tensor(tensor, base_dir): # type: (TensorProto, Text) -> None
"""
Load data from an external file for tensor.
@params
tensor: a TensorProto object.
base_dir: directory that contains the external data.
"""
if tensor.HasField("raw_data"): # already loaded
return
info = ExternalDataInfo(tensor)
file_location = _sanitize_path(info.location)
external_data_file_path = os.path.join(base_dir, file_location)
with open(external_data_file_path, 'rb') as data_file:
if info.offset:
data_file.seek(info.offset)
if info.length:
tensor.raw_data = data_file.read(info.length)
else:
tensor.raw_data = data_file.read()
def load_external_data_for_model(model, base_dir): # type: (ModelProto, Text) -> None
"""
Loads external tensors into model
@params
model: ModelProto to load external data to
base_dir: directory that contains external data
"""
for tensor in _get_all_tensors(model):
if uses_external_data(tensor):
load_external_data_for_tensor(tensor, base_dir)
def set_external_data(tensor, # type: TensorProto
location, # type: Text
offset=None, # type: Optional[int]
length=None, # type: Optional[int]
checksum=None, # type: Optional[Text]
basepath=None # type: Optional[Text]
): # type: (...) -> None
del tensor.external_data[:]
tensor.data_location = TensorProto.EXTERNAL
for (k, v) in {
'location': location,
'offset': int(offset) if offset is not None else None,
'length': int(length) if length is not None else None,
'checksum': checksum,
'basepath': basepath
}.items():
if v is not None:
entry = tensor.external_data.add()
entry.key = k
entry.value = str(v)
def convert_model_to_external_data(model, all_tensors_to_one_file=True, location=None):
# type: (ModelProto, bool, Optional[Text]) -> None
"""
call to set all tensors as external data. save_model saves all the tensors data as external data after calling this function.
@params
model: ModelProto to be converted.
all_tensors_to_one_file: If true, save all tensors to one external file specified by location.
If false, save each tensor to a file named with the tensor name.
location: specify the external file that all tensors to save to.
If not specified, will use the model name.
"""
if all_tensors_to_one_file:
file_name = Text(uuid.uuid1())
if location:
file_name = location
for tensor in _get_all_tensors(model):
set_external_data(tensor, file_name)
else:
for tensor in _get_all_tensors(model):
set_external_data(tensor, tensor.name)
def convert_model_from_external_data(model): # type: (ModelProto) -> None
"""
call to set all tensors data as embedded data. save_model saves all the tensors data as embedded data after calling this function.
@params
model: ModelProto to be converted.
"""
for tensor in _get_all_tensors(model):
if uses_external_data(tensor):
if not tensor.HasField("raw_data"):
raise ValueError("raw_data field doesn't exist.")
del tensor.external_data[:]
tensor.data_location = TensorProto.DEFAULT
def save_external_data(tensor, base_path): # type: (TensorProto, Text) -> None
"""
Write tensor data to an external file according to information in the `external_data` field.
@params
tensor: Tensor object to be serialized
base_path: System path of a folder where tensor data is to be stored
"""
info = ExternalDataInfo(tensor)
external_data_file_path = os.path.join(base_path, info.location)
# Retrieve the tensor's data from raw_data or load external file
if not tensor.HasField("raw_data"):
raise ValueError("raw_data field doesn't exist.")
# Create file if it doesn't exist
if not os.path.isfile(external_data_file_path):
open(external_data_file_path, 'ab').close()
# Open file for reading and writing at random locations ('r+b')
with open(external_data_file_path, 'r+b') as data_file:
data_file.seek(0, 2)
if info.offset is not None:
# Pad file to required offset if needed
file_size = data_file.tell()
if info.offset > file_size:
data_file.write(b"\0" * (info.offset - file_size))
data_file.seek(info.offset)
offset = data_file.tell()
data_file.write(tensor.raw_data)
set_external_data(tensor, info.location, offset, data_file.tell() - offset)
def _get_all_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto]
"""Scan an ONNX model for all tensors and return as an iterator."""
return chain(_get_initializer_tensors(onnx_model_proto),
_get_attribute_tensors(onnx_model_proto))
def _get_initializer_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto]
"""Create an iterator of initializer tensors from ONNX model."""
for initializer in onnx_model_proto.graph.initializer:
yield initializer
def _get_attribute_tensors(onnx_model_proto): # type: (ModelProto) -> Iterable[TensorProto]
"""Create an iterator of tensors from node attributes of an ONNX model."""
for node in onnx_model_proto.graph.node:
for attribute in node.attribute:
if attribute.HasField("t"):
yield attribute.t
for tensor in attribute.tensors:
yield tensor
def _sanitize_path(path): # type: (Text) -> Text
"""Remove path components which would allow traversing up a directory tree from a base path.
Note: This method is currently very basic and should be expanded.
"""
return path.lstrip('/.')
def uses_external_data(tensor): # type: (TensorProto) -> bool
"""Return true if the tensor stores data in an external location."""
return tensor.HasField("data_location") and tensor.data_location == TensorProto.EXTERNAL
def remove_external_data_field(tensor, field_key): # type: (TensorProto, Text) -> None
"""
Remove a field from a Tensor's external_data key-value store.
Modifies tensor object in place.
@params
tensor: Tensor object from which value will be removed
field_key: The key of the field to be removed
"""
for (i, field) in enumerate(tensor.external_data):
if field.key == field_key:
del tensor.external_data[i]
def write_external_data_tensors(model, filepath): # type: (ModelProto, Text) -> ModelProto
"""
Write external data of all tensors to files on disk.
Note: This function also strips basepath information from all tensors' external_data fields.
@params
model: Model object which is the source of tensors to serialize.
filepath: System path to the directory which should be treated as base path for external data.
@return
The modified model object.
"""
for tensor in _get_all_tensors(model):
if uses_external_data(tensor):
save_external_data(tensor, filepath)
tensor.ClearField(str('raw_data'))
return model
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import TensorProto
from typing import Text, Any
import numpy as np # type: ignore
TENSOR_TYPE_TO_NP_TYPE = {
int(TensorProto.FLOAT): np.dtype('float32'),
int(TensorProto.UINT8): np.dtype('uint8'),
int(TensorProto.INT8): np.dtype('int8'),
int(TensorProto.UINT16): np.dtype('uint16'),
int(TensorProto.INT16): np.dtype('int16'),
int(TensorProto.INT32): np.dtype('int32'),
int(TensorProto.INT64): np.dtype('int64'),
int(TensorProto.BOOL): np.dtype('bool'),
int(TensorProto.FLOAT16): np.dtype('float16'),
int(TensorProto.DOUBLE): np.dtype('float64'),
int(TensorProto.COMPLEX64): np.dtype('complex64'),
int(TensorProto.COMPLEX128): np.dtype('complex128'),
int(TensorProto.UINT32): np.dtype('uint32'),
int(TensorProto.UINT64): np.dtype('uint64'),
int(TensorProto.STRING): np.dtype(np.object)
}
NP_TYPE_TO_TENSOR_TYPE = {v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items()}
TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE = {
int(TensorProto.FLOAT): int(TensorProto.FLOAT),
int(TensorProto.UINT8): int(TensorProto.INT32),
int(TensorProto.INT8): int(TensorProto.INT32),
int(TensorProto.UINT16): int(TensorProto.INT32),
int(TensorProto.INT16): int(TensorProto.INT32),
int(TensorProto.INT32): int(TensorProto.INT32),
int(TensorProto.INT64): int(TensorProto.INT64),
int(TensorProto.BOOL): int(TensorProto.INT32),
int(TensorProto.FLOAT16): int(TensorProto.UINT16),
int(TensorProto.BFLOAT16): int(TensorProto.UINT16),
int(TensorProto.DOUBLE): int(TensorProto.DOUBLE),
int(TensorProto.COMPLEX64): int(TensorProto.FLOAT),
int(TensorProto.COMPLEX128): int(TensorProto.DOUBLE),
int(TensorProto.UINT32): int(TensorProto.UINT32),
int(TensorProto.UINT64): int(TensorProto.UINT64),
int(TensorProto.STRING): int(TensorProto.STRING),
}
STORAGE_TENSOR_TYPE_TO_FIELD = {
int(TensorProto.FLOAT): 'float_data',
int(TensorProto.INT32): 'int32_data',
int(TensorProto.INT64): 'int64_data',
int(TensorProto.UINT16): 'int32_data',
int(TensorProto.DOUBLE): 'double_data',
int(TensorProto.COMPLEX64): 'float_data',
int(TensorProto.COMPLEX128): 'double_data',
int(TensorProto.UINT32): 'uint64_data',
int(TensorProto.UINT64): 'uint64_data',
int(TensorProto.STRING): 'string_data',
int(TensorProto.BOOL): 'int32_data',
}
|
"""onnx version converter
This enables users to convert their models between different opsets within the
default domain ("" or "ai.onnx").
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.version_converter as C
from onnx import ModelProto
from typing import Text, Sequence
"""Apply the version conversion on the serialized ModelProto.
Arguments:
input (ModelProto): model
target_version (int): target opset version
Return:
return (ModelProto) converted model
Raises Exceptions:
RuntimeError when some necessary conversion is not supported
Supported adapters:
--Add from Opset 7 to Opset 6
--Add from Opset 6 to Opset 5
--Add from Opset 6 to Opset 7
--Add from Opset 5 to Opset 6
--Mul from Opset 6 to Opset 7
--Mul from Opset 7 to Opset 6
--Mul from Opset 6 to Opset 5
--Mul from Opset 5 to Opset 6
--Gemm from Opset 7 to Opset 6
--Gemm from Opset 6 to Opset 5
--Gemm from Opset 6 to Opset 7
--Gemm from Opset 5 to Opset 6
--Relu from Opset 6 to Opset 5
--Relu from Opset 5 to Opset 6
--BatchNorm from Opset 7 to Opset 6
--BatchNorm from Opset 6 to Opset 7
--BatchNorm from Opset 6 to Opset 5
--BatchNorm from Opset 5 to Opset 6
--Concat from Opset 4 to Opset 3
--Concat from Opset 3 to Opset 4
--Reshape from Opset 5 to Opset 4
--Reshape from Opset 4 to Opset 5
--Sum from Opset 7 to Opset 8
--Sum from Opset 8 to Opset 7
--Sum from Opset 6 to Opset 5
--Sum from Opset 5 to Opset 6
--MaxPool from Opset 8 to Opset 7
--MaxPool from Opset 7 to Opset 8
--AveragePool from Opset 7 to Opset 6
--AveragePool from Opset 6 to Opset 7
--Dropout from Opset 7 to Opset 6
--Dropout from Opset 6 to Opset 5
--Dropout from Opset 6 to Opset 7
--Dropout from Opset 5 to Opset 6
Unsupported adapters:
--Min from Opset 8 to Opset 7
--Min from Opset 7 to Opset 8
--Min from Opset 6 to Opset 5
--Min from Opset 5 to Opset 6
--Mean from Opset 8 to Opset 7
--Mean from Opset 7 to Opset 8
--Mean from Opset 6 to Opset 5
--Mean from Opset 5 to Opset 6
--Max from Opset 8 to Opset 7
--Max from Opset 7 to Opset 8
--Max from Opset 6 to Opset 5
--Max from Opset 5 to Opset 6
--Xor from Opset 6 to Opset 7
--Xor from Opset 7 to Opset 6
--Upsample from Opset 6 to Opset 7
--Upsample from Opset 7 to Opset 6
--Sub from Opset 6 to Opset 7
--Sub from Opset 7 to Opset 6
--Sub from Opset 6 to Opset 5
--Sub from Opset 5 to Opset 6
--RNN from Opset 6 to Opset 7
--RNN from Opset 7 to Opset 6
--Pow from Opset 6 to Opset 7
--Pow from Opset 7 to Opset 6
--PRelu from Opset 6 to Opset 7
--PRelu from Opset 7 to Opset 6
--PRelu from Opset 6 to Opset 5
--PRelu from Opset 5 to Opset 6
--Or from Opset 6 to Opset 7
--Or from Opset 7 to Opset 6
--Less from Opset 6 to Opset 7
--Less from Opset 7 to Opset 6
--LSTM from Opset 6 to Opset 7
--LSTM from Opset 7 to Opset 6
--Greater from Opset 6 to Opset 7
--Greater from Opset 7 to Opset 6
--GRU from Opset 6 to Opset 7
--GRU from Opset 7 to Opset 6
--GRU from Opset 3 to Opset 2
--GRU from Opset 2 to Opset 3
--Equal from Opset 6 to Opset 7
--Equal from Opset 7 to Opset 6
--Div from Opset 6 to Opset 7
--Div from Opset 7 to Opset 6
--Div from Opset 6 to Opset 5
--Div from Opset 5 to Opset 6
--And from Opset 6 to Opset 7
--And from Opset 7 to Opset 6
--And from Opset 6 to Opset 5
--And from Opset 5 to Opset 6
--Tile from Opset 6 to Opset 5
--Tile from Opset 5 to Opset 6
--Sqrt from Opset 6 to Opset 5
--Sqrt from Opset 5 to Opset 6
--Sigmoid from opset 6 to opset 5
--Sigmoid from opset 5 to opset 6
--Selu from opset 6 to opset 5
--Selu from opset 5 to opset 6
--Reciprocal from opset 6 to opset 5
--Reciprocal from opset 5 to opset 6
--Neg from opset 6 to opset 5
--Neg from opset 5 to opset 6
--Log from opset 6 to opset 5
--Log from opset 5 to opset 6
--LeakyRelu from opset 6 to opset 5
--LeakyRelu from opset 5 to opset 6
--InstanceNormalization from opset 6 to opset 5
--InstanceNormalization from opset 5 to opset 6
--HardSigmoid from opset 6 to opset 5
--HardSigmoid from opset 5 to opset 6
--Floor from opset 6 to opset 5
--Floor from opset 5 to opset 6
--Exp from opset 6 to opset 5
--Exp from opset 5 to opset 6
--Elu from opset 6 to opset 5
--Elu from opset 5 to opset 6
--Clip from opset 6 to opset 5
--Clip from opset 5 to opset 6
--Ceil from opset 6 to opset 5
--Ceil from opset 5 to opset 6
--Cast from opset 6 to opset 5
--Cast from opset 5 to opset 6
--Abs from opset 6 to opset 5
--Abs from opset 5 to opset 6
--Split from opset 2 to opset 1
--Split from opset 1 to opset 2
--Pad from opset 2 to opset 1
--Pad from opset 1 to opset 2
--LpPool from opset 2 to opset 1
--LpPool from opset 1 to opset 2
--GlobalLpPool from opset 2 to opset 1
--GlobalLpPool from opset 1 to opset 2
"""
def convert_version(model, target_version): # type: (ModelProto, int) -> ModelProto
if not isinstance(model, ModelProto):
raise ValueError('VersionConverter only accepts ModelProto as model, incorrect type: {}'.format(type(model)))
if not isinstance(target_version, int):
raise ValueError('VersionConverter only accepts int as target_version, incorrect type: {}'.format(type(target_version)))
model_str = model.SerializeToString()
converted_model_str = C.convert_version(model_str, target_version)
return onnx.load_from_string(converted_model_str)
|
"""onnx checker
This implements graphalities that allows us to check whether a serialized
proto is legal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from onnx import (ValueInfoProto,
AttributeProto,
TensorProto,
SparseTensorProto,
NodeProto,
ModelProto,
GraphProto,
IR_VERSION)
import onnx.onnx_cpp2py_export.checker as C
import onnx.defs
from google.protobuf.message import Message
from typing import TypeVar, Callable, Any, Type, cast, Union, Text
from six import string_types
import onnx.shape_inference
# TODO: This thing where we reserialize the protobuf back into the
# string, only to deserialize it at the call site, is really goofy.
# Stop doing that.
# NB: Please don't edit this context!
DEFAULT_CONTEXT = C.CheckerContext()
DEFAULT_CONTEXT.ir_version = IR_VERSION
# TODO: Maybe ONNX-ML should also be defaulted?
DEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}
FuncType = TypeVar('FuncType', bound=Callable[..., Any])
# TODO: This really doesn't seem worth the metaprogramming...
def _create_checker(proto_type): # type: (Type[Message]) -> Callable[[FuncType], FuncType]
def decorator(py_func): # type: (FuncType) -> FuncType
@functools.wraps(py_func)
def checker(proto, ctx=DEFAULT_CONTEXT): # type: (Message, C.CheckerContext) -> Any
if not isinstance(proto, proto_type):
raise RuntimeError(
'You cannot pass an object that is not of type {}'.format(
proto_type.__name__))
return getattr(C, py_func.__name__)(
proto.SerializeToString(), ctx)
return cast(FuncType, checker)
return decorator
@_create_checker(ValueInfoProto)
def check_value_info(value_info, ctx=DEFAULT_CONTEXT): # type: (ValueInfoProto, C.CheckerContext) -> None
pass
@_create_checker(TensorProto)
def check_tensor(tensor, ctx=DEFAULT_CONTEXT): # type: (TensorProto, C.CheckerContext) -> None
pass
@_create_checker(AttributeProto)
def check_attribute(attr, ctx=DEFAULT_CONTEXT): # type: (AttributeProto, C.CheckerContext) -> None
pass
@_create_checker(NodeProto)
def check_node(node, ctx=DEFAULT_CONTEXT): # type: (NodeProto, C.CheckerContext) -> None
pass
@_create_checker(GraphProto)
def check_graph(graph, ctx=DEFAULT_CONTEXT): # type: (GraphProto, C.CheckerContext) -> None
pass
def check_sparse_tensor(sparse, ctx=DEFAULT_CONTEXT): # type: (SparseTensorProto, C.CheckerContext) -> None
C.check_sparse_tensor(sparse.SerializeToString(), ctx)
def check_model(model, full_check=False): # type: (Union[ModelProto, Text], bool) -> None
if isinstance(model, string_types):
C.check_model_path(model)
m = onnx.load(model)
else:
C.check_model(model.SerializeToString())
m = model
if full_check:
onnx.shape_inference.infer_shapes(m, True)
ValidationError = C.ValidationError
|
# A library and utility for drawing ONNX nets. Most of this implementation has
# been borrowed from the caffe2 implementation
# https://github.com/caffe2/caffe2/blob/master/caffe2/python/net_drawer.py
#
# The script takes two required arguments:
# -input: a path to a serialized ModelProto .pb file.
# -output: a path to write a dot file representation of the graph
#
# Given this dot file representation, you can-for example-export this to svg
# with the graphviz `dot` utility, like so:
#
# $ dot -Tsvg my_output.dot -o my_output.svg
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from collections import defaultdict
import json
from onnx import ModelProto, GraphProto, NodeProto
import pydot # type: ignore
from typing import Text, Any, Callable, Optional, Dict
OP_STYLE = {
'shape': 'box',
'color': '#0F9D58',
'style': 'filled',
'fontcolor': '#FFFFFF'
}
BLOB_STYLE = {'shape': 'octagon'}
_NodeProducer = Callable[[NodeProto, int], pydot.Node]
def _escape_label(name): # type: (Text) -> Text
# json.dumps is poor man's escaping
return json.dumps(name)
def _form_and_sanitize_docstring(s): # type: (Text) -> Text
url = 'javascript:alert('
url += _escape_label(s).replace('"', '\'').replace('<', '').replace('>', '')
url += ')'
return url
def GetOpNodeProducer(embed_docstring=False, **kwargs): # type: (bool, **Any) -> _NodeProducer
def ReallyGetOpNode(op, op_id): # type: (NodeProto, int) -> pydot.Node
if op.name:
node_name = '%s/%s (op#%d)' % (op.name, op.op_type, op_id)
else:
node_name = '%s (op#%d)' % (op.op_type, op_id)
for i, input in enumerate(op.input):
node_name += '\n input' + str(i) + ' ' + input
for i, output in enumerate(op.output):
node_name += '\n output' + str(i) + ' ' + output
node = pydot.Node(node_name, **kwargs)
if embed_docstring:
url = _form_and_sanitize_docstring(op.doc_string)
node.set_URL(url)
return node
return ReallyGetOpNode
def GetPydotGraph(
graph, # type: GraphProto
name=None, # type: Optional[Text]
rankdir='LR', # type: Text
node_producer=None, # type: Optional[_NodeProducer]
embed_docstring=False, # type: bool
): # type: (...) -> pydot.Dot
if node_producer is None:
node_producer = GetOpNodeProducer(embed_docstring=embed_docstring, **OP_STYLE)
pydot_graph = pydot.Dot(name, rankdir=rankdir)
pydot_nodes = {} # type: Dict[Text, pydot.Node]
pydot_node_counts = defaultdict(int) # type: Dict[Text, int]
for op_id, op in enumerate(graph.node):
op_node = node_producer(op, op_id)
pydot_graph.add_node(op_node)
for input_name in op.input:
if input_name not in pydot_nodes:
input_node = pydot.Node(
_escape_label(
input_name + str(pydot_node_counts[input_name])),
label=_escape_label(input_name),
**BLOB_STYLE
)
pydot_nodes[input_name] = input_node
else:
input_node = pydot_nodes[input_name]
pydot_graph.add_node(input_node)
pydot_graph.add_edge(pydot.Edge(input_node, op_node))
for output_name in op.output:
if output_name in pydot_nodes:
pydot_node_counts[output_name] += 1
output_node = pydot.Node(
_escape_label(
output_name + str(pydot_node_counts[output_name])),
label=_escape_label(output_name),
**BLOB_STYLE
)
pydot_nodes[output_name] = output_node
pydot_graph.add_node(output_node)
pydot_graph.add_edge(pydot.Edge(op_node, output_node))
return pydot_graph
def main(): # type: () -> None
parser = argparse.ArgumentParser(description="ONNX net drawer")
parser.add_argument(
"--input",
type=Text, required=True,
help="The input protobuf file.",
)
parser.add_argument(
"--output",
type=Text, required=True,
help="The output protobuf file.",
)
parser.add_argument(
"--rankdir", type=Text, default='LR',
help="The rank direction of the pydot graph.",
)
parser.add_argument(
"--embed_docstring", action="store_true",
help="Embed docstring as javascript alert. Useful for SVG format.",
)
args = parser.parse_args()
model = ModelProto()
with open(args.input, 'rb') as fid:
content = fid.read()
model.ParseFromString(content)
pydot_graph = GetPydotGraph(
model.graph,
name=model.graph.name,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(
embed_docstring=args.embed_docstring,
**OP_STYLE
),
)
pydot_graph.write_dot(args.output)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six import string_types
from typing import Any, List, Text, Dict, Set
from onnx import ModelProto, ValueInfoProto
import onnx.checker
def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto
"""
This function updates the dimension sizes of the model's inputs and outputs to the values
provided in input_dims and output_dims. if the dim value provided is negative, a unique dim_param
will be set for that dimension.
Example. if we have the following shape for inputs and outputs:
shape(input_1) = ('b', 3, 'w', 'h')
shape(input_2) = ('b', 4)
and shape(output) = ('b', 'd', 5)
The parameters can be provided as:
input_dims = {
"input_1": ['b', 3, 'w', 'h'],
"input_2": ['b', 4],
}
output_dims = {
"output": ['b', -1, 5]
}
Putting it together:
model = onnx.load('model.onnx')
updated_model = update_inputs_outputs_dims(model, input_dims, output_dims)
onnx.save(updated_model, 'model.onnx')
"""
dim_param_set = set() # type: Set[Text]
def init_dim_param_set(dim_param_set, value_infos): # type: (Set[Text], List[ValueInfoProto]) -> None
for info in value_infos:
shape = info.type.tensor_type.shape
for dim in shape.dim:
if dim.HasField('dim_param'):
dim_param_set.add(dim.dim_param) # type: ignore
init_dim_param_set(dim_param_set, model.graph.input) # type: ignore
init_dim_param_set(dim_param_set, model.graph.output) # type: ignore
init_dim_param_set(dim_param_set, model.graph.value_info) # type: ignore
def update_dim(tensor, dim, j, name): # type: (ValueInfoProto, Any, int, Text) -> None
dim_proto = tensor.type.tensor_type.shape.dim[j]
if isinstance(dim, int):
if dim >= 0:
if dim_proto.HasField('dim_value') and dim_proto.dim_value != dim:
raise ValueError('Unable to set dimension value to {} for axis {} of {}. Contradicts existing dimension value {}.'
.format(dim, j, name, dim_proto.dim_value))
dim_proto.dim_value = dim
else:
generated_dim_param = name + '_' + str(j)
if generated_dim_param in dim_param_set:
raise ValueError('Unable to generate unique dim_param for axis {} of {}. Please manually provide a dim_param value.'
.format(j, name))
dim_proto.dim_param = generated_dim_param
elif isinstance(dim, string_types):
dim_proto.dim_param = dim
else:
raise ValueError('Only int or str is accepted as dimension value, incorrect type: {}'.format(type(dim)))
for input in model.graph.input:
input_name = input.name
input_dim_arr = input_dims[input_name]
for j, dim in enumerate(input_dim_arr):
update_dim(input, dim, j, input_name)
for output in model.graph.output:
output_name = output.name
output_dim_arr = output_dims[output_name]
for j, dim in enumerate(output_dim_arr):
update_dim(output, dim, j, output_name)
onnx.checker.check_model(model)
return model
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto, shape_inference
from typing import Sequence, Text, Any, Tuple, List, Callable
from onnx import numpy_helper
import numpy as np # type: ignore
import onnx.optimizer
import unittest
class TestOptimizer(unittest.TestCase):
def _optimized(self, graph, opts, fixed_point=False, **kwargs): # type: (GraphProto, Sequence[Text], bool, **Any) -> ModelProto
orig_model = helper.make_model(graph, producer_name='onnx-test', **kwargs)
optimized_model = onnx.optimizer.optimize(orig_model, opts, fixed_point)
checker.check_model(optimized_model)
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
input_types, # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
zero = helper.make_tensor(
"trip_count_value", TensorProto.INT32, (), [10])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info(
"cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
def _visit_all_nodes_recursive(self, graph, fn): # type: (GraphProto, Callable[[NodeProto], None]) -> None
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
def test_get_available_passes(self): # type: () -> None
# FIXME does not guarantees to be listing all
graph = helper.make_graph([], "dummy_graph", [], [])
list_of_passes = onnx.optimizer.get_available_passes()
assert isinstance(list_of_passes, (list)) and len(list_of_passes) > 0
for pass_name in list_of_passes:
# If pass_name is invalid it throws a RuntimeError
self._optimized(graph, [pass_name])
def test_eliminate_identity_single_use(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["_Y"], ["_Y2"])],
[(TensorProto.FLOAT, (5,), "Y")],
[(TensorProto.FLOAT, (5,), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
# All identity nodes should have been eliminated
def check_identity(node): # type: (NodeProto) -> None
assert node.op_type != "Identity"
self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
# Use of the output from the Identity node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Identity node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
def test_eliminate_identity_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
identity = helper.make_node("Identity", ["A"], ["B"])
graph = helper.make_graph(
[add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 1
def test_eliminate_identity_multiple_uses(self): # type: () -> None
identity = helper.make_node("Identity", ["X"], ["Y"])
add = helper.make_node("Add", ["Z", "Y"], ["A"])
mul = helper.make_node("Mul", ["A", "Y"], ["B"])
graph = helper.make_graph(
[identity, add, mul],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 2
def test_nop_transpose_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans = helper.make_node("Transpose", ["A"], ["B"], perm=[0, 1])
graph = helper.make_graph(
[add, trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_nop_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
def test_nop_transpose_default(self): # type: () -> None
trans = helper.make_node("Transpose", ["X"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Transpose"
def test_nop_pad_opset10(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))])
assert len(graph.node) == 1
optimized_model = self._optimized(graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "X"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
pad = helper.make_node("Pad", ["A", "Pads"], ["B"])
graph = helper.make_graph(
[add, pad],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (2,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(2,),
vals=np.array([0, 0]).astype(np.int64).tobytes(),
raw=True)])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.node) == 1
def test_nop_pad(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X", "Pads"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 0, 0, 0]).astype(np.int64).tobytes(),
raw=True)])
assert len(graph.node) == 1
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "X"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_default_opset10(self): # type: () -> None
trans = helper.make_node("Pad", ["X"], ["Y"], pads=[0, 1])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))])
optimized_model = self._optimized(graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_nop_pad_default(self): # type: () -> None
trans = helper.make_node("Pad", ["X", "Pads"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 1, 0, 0]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_eliminate_unused_initializer(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
def test_eliminate_unused_initializer_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
assert len(optimized_model.graph.input) == 2
def test_eliminate_unused_initializer_no_eliminate_used_default(self): # type: () -> None
add = helper.make_node("Add", ["X", "A"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
def test_eliminate_unused_initializer_no_eliminate_used(self): # type: () -> None
nodes = [helper.make_node("Add", ["X", "A"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Add", ["_X", "_A"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 2), "X"),
(TensorProto.FLOAT, (1, 2), "A")],
[(TensorProto.FLOAT, (1, 2), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
# Add, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Add"
assert optimized_model.graph.output[0].name == "Z"
# Add
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Add'
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z2'
assert len(list(optimized_model.graph.initializer)) == 1
def test_eliminate_unused_initializer_no_eliminate_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
assert "Z" in [o.name for o in optimized_model.graph.output]
def test_extract_constant_to_initializer(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(
graph, ["extract_constant_to_initializer"])
self.assertEqual(
set(vi.name for vi in optimized_model.graph.input),
{'X', 'Y', 'A'})
self.assertEqual(len(optimized_model.graph.initializer), 1)
init = optimized_model.graph.initializer[0]
self.assertEqual(init.name, 'A')
self.assertEqual(init.dims, [16, 1, 1])
self.assertEqual(init.data_type, TensorProto.FLOAT)
self.assertEqual(
[n.op_type for n in optimized_model.graph.node], ['Conv', 'Add'])
def test_fuse_concats(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=0),
helper.make_node("Concat", ["X", "G", "Y"], ["Z"], axis=0)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("G", TensorProto.FLOAT, (4, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (18, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"], True) # two passes are needed to simplify the graph to its simplest state.
assert len(optimized_model.graph.node) == 1
assert len(optimized_model.graph.node[0].input) == 7
assert optimized_model.graph.node[0].input == [
"A", "B", "C", "G", "D", "E", "F"]
assert optimized_model.graph.node[0].op_type == "Concat"
def test_fuse_concats_different_axis(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=1),
helper.make_node("Concat", ["X", "Y"], ["Z"], axis=2)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (4, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (18, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"], True) # two passes are needed to simplify the graph to its simplest state.
assert optimized_model.graph == graph
def test_fuse_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_Y2"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["_Y2"], ["_Y3"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["_Y3"], ["_Y4"], perm=[2, 0, 1])],
[(TensorProto.FLOAT, (2, 3), "X")],
[(TensorProto.FLOAT, (2, 3), "Y4")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, (4, 3, 2))])
original_model = helper.make_model(graph)
shape_inference.infer_shapes(original_model)
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
shape_inference.infer_shapes(optimized_model)
# Transpose, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
# Transpose
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
def test_fuse_transpose_default_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans1 = helper.make_node("Transpose", ["A"], ["B"])
trans2 = helper.make_node("Transpose", ["B"], ["C"])
graph = helper.make_graph(
[add, trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_fuse_transpose_default(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 0
def test_fuse_transpose_default_no_fuse(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[0, 1, 2])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 2
for node in optimized_model.graph.node:
assert node.op_type == "Transpose"
def test_fuse_transpose_into_gemm(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
helper.make_node("Transpose", ["_Y"], ["_B"], perm=[1, 0]),
helper.make_node("Gemm", ["_A", "_B", "_C"], ["_Z2"])],
[(TensorProto.FLOAT, (2, 3), "X"),
(TensorProto.FLOAT, (5, 2), "Y"),
(TensorProto.FLOAT, (3, 5), "C")],
[(TensorProto.FLOAT, (2, 3), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])
# Gemm, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Gemm"
# Gemm
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
def test_fuse_add_bias_into_conv_use_weight_shape(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Conv", ["_X", "_Y"], ["_Z"]),
helper.make_node("Add", ["_Z", "_A"], ["_B2"])],
[(TensorProto.FLOAT, (1, 5, 3, 3), "X"),
(TensorProto.FLOAT, (16, 5, 3, 3), "Y"),
(TensorProto.FLOAT, (16, 1, 1), "A")],
[(TensorProto.FLOAT, (1, 16, 3, 3), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# Squeeze, Conv, Constant (trip count), Constant (condition), Loop
assert len(list(optimized_model.graph.node)) == 5
assert optimized_model.graph.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
# Squeeze, Conv
assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2
assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv'
# Output 1 since 0 is 'cond'
assert optimized_model.graph.node[4].attribute[0].g.output[1].name == '_Z'
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 3
assert len(optimized_model.graph.value_info) == 1
assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
assert len(
optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Tile'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
def test_fuse_add_bias_into_conv_use_conv_shape(self): # type: () -> None
sub = helper.make_node("Sub", ["M", "N"], ["Y"])
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[sub, conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"M", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info(
"N", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))
],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Sub'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
def test_fuse_add_bias_into_conv_use_move_constant(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
def test_fuse_add_bias_into_conv_squeeze_1d_bias_no_fuse(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_add_bias_into_conv_squeeze_3d_bias_no_fuse(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_matmul_add_bias_into_gemm(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16,))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
def test_fuse_matmul_add_bias_into_gemm_2d_bias(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
def test_fuse_matmul_add_bias_into_gemm_2d_bias_same_shape(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (32, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
def test_fuse_matmul_add_bias_into_gemm_2d_bias_bcast_no_fuse(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 16))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
def test_fuse_matmul_add_bias_into_gemm_3d_matmul_no_fuse(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (3, 3))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
def test_fuse_matmul_add_bias_into_gemm_3d_bias_no_fuse(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
def test_fuse_matmul_add_bias_into_gemm_multiple_use_no_fuse(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
identity = helper.make_node("Identity", ["Z"], ["A1"])
add = helper.make_node("Add", ["Z", "B"], ["A2"])
graph = helper.make_graph(
[matmul, add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A1", TensorProto.FLOAT, (32, 16)),
helper.make_tensor_value_info("A2", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_no_optional_value_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 0, 1, 1]
def test_fuse_pad_into_conv_no_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 0, 1, 1]
def test_fuse_pad_into_conv_with_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
vals=np.array([0]).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 0, 1, 1]
def test_fuse_pad_into_conv_with_nonzero_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
vals=np.array([25]).astype(np.float32).tobytes(), # non-zero Constant_value -> so no pad
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_1d_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 1, 0, 0, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))]
)
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
def test_fuse_pad_into_conv_1d(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (6,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(6,),
vals=np.array([0, 0, 1, 0, 0, 1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
def test_fuse_pad_into_conv_existing_conv_pad_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1, 1, 1]
def test_fuse_pad_into_conv_existing_conv_pad(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1, 1, 1]
def test_fuse_pad_into_conv_pad_feature_no_fuse_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 1, 0, 0, 0, 0, 0, 0]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_pad_feature_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 1, 0, 0, 0, 0, 0, 0]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_negative_pad_no_fuse_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, -1, -1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_negative_pad_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, -1, -1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_reflection_pad_no_fuse_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="reflect",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_reflection_pad_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="reflect"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_consecutive_squeezes(self): # type: () -> None
nodes = [helper.make_node("Squeeze", ["X"], ["Y"], axes=[0, 4, 5]),
helper.make_node("Squeeze", ["Y"], ["Z"], axes=[0, 3])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Squeeze", ["_X"], ["_Y"], axes=[0, 4, 5]),
helper.make_node("Squeeze", ["_Y"], ["_Z2"], axes=[0, 3])],
[(TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9), "X")],
[(TensorProto.FLOAT, (2, 3, 1, 8, 9), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
# Squeeze, Constant (trip count), Constant (cond), Loop
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 4
def test_fuse_consecutive_squeezes_default(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X"], ["Y"], axes=[0, 4, 5])
squeeze2 = helper.make_node("Squeeze", ["Y"], ["Z"], axes=[0, 3])
squeeze3 = helper.make_node("Squeeze", ["Z"], ["A"], axes=[2])
nodes = [squeeze1, squeeze2, squeeze3]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 8, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 1, 4, 5, 6, 7]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_random(self): # type: () -> None
x_shape = [1, 1, 1, 3, 4, 1, 6, 1, 1, 9]
s1_one_indices = [i for i, a in enumerate(x_shape) if a == 1]
s1_axes = np.random.choice(s1_one_indices, size=np.random.randint(low=1, high=len(s1_one_indices) - 1),
replace=False)
s2_x_shape = [a for i, a in enumerate(x_shape) if i not in s1_axes]
s2_one_indices = [i for i, a in enumerate(s2_x_shape) if a == 1]
s2_axes = s2_one_indices
squeeze1 = helper.make_node("Squeeze", ["X"], ["Y"], axes=s1_axes)
squeeze2 = helper.make_node("Squeeze", ["Y"], ["Z"], axes=s2_axes)
nodes = [squeeze1, squeeze2]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, x_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 4, 6, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 1, 2, 5, 7, 8]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_multi_uses(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X"], ["Y"], axes=[0, 4, 5])
add = helper.make_node("Add", ["Y", "A"], ["Z2"])
squeeze2 = helper.make_node("Squeeze", ["Y"], ["Z"], axes=[0, 3])
graph = helper.make_graph(
[squeeze1, add, squeeze2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (1, 2, 3, 1, 1, 8, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 4, 5]
assert optimized_model.graph.node[2].op_type == "Squeeze"
assert optimized_model.graph.node[2].input == ["X"]
assert list(optimized_model.graph.node[2].attribute[0].ints) == [
0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 3
def test_fuse_consecutive_softmax_log_axis(self): # type: () -> None
for axis in range(3):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=axis)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
def test_fuse_consecutive_softmax_log_side_effect(self): # type: () -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert graph == optimized_model.graph
def test_fuse_consecutive_softmax_log_multiple_out(self): # type: () -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
graph = helper.make_graph(
[softmax, log, exp],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert len(optimized_model.graph.output) == 2
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.output[1].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Exp"
def test_preserve_value_info(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1])
trans3 = helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])
graph = helper.make_graph(
[trans1, trans2, trans3],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3))])
vi = helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))
graph.value_info.extend([vi])
optimized_model = self._optimized(graph, ["nop"])
assert list(optimized_model.graph.value_info) == [vi]
assert len(list(optimized_model.graph.node)) == 3
def test_split(self): # type: () -> None
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['X'],
value=onnx.helper.make_tensor(
name='X',
data_type=TensorProto.FLOAT,
dims=[1],
vals=[5],
),
)
graph = helper.make_graph(
[node],
'test-optimize-split',
[],
[helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])
init_model = self._optimized(graph, ['split_init'])
self.assertEqual(len(init_model.graph.node), 1)
self.assertEqual(len(init_model.graph.output), 1)
self.assertEqual(init_model.graph.node[0].op_type, 'Constant')
predict_model = self._optimized(graph, ['split_predict'])
self.assertEqual(len(predict_model.graph.node), 0)
self.assertEqual(len(predict_model.graph.input), 1)
self.assertEqual(predict_model.graph.input[0].name, 'X')
def test_lift_lex_loop(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["lift_lexical_references"])
assert len(optimized_model.graph.node) == 4
# body_graph, __control_inputs
assert len(optimized_model.graph.node[3].attribute) == 2
assert optimized_model.graph.node[3].attribute[1].name == "__control_inputs"
assert optimized_model.graph.node[3].attribute[1].strings[0] == b"X"
assert optimized_model.graph.node[3].attribute[1].strings[1] == b"Y"
def test_lift_lex_if(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_if_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["X"], ["_Y3"])],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "If" node now diverges from ONNX schema. Disable checking.
optimized_model = self._optimized(graph, ["lift_lexical_references"])
# Identity, Constant (condition), If
assert len(optimized_model.graph.node) == 3
# else_branch, then_branch, __control_inputs
assert len(optimized_model.graph.node[2].attribute) == 3
assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
def test_fuse_bn_into_conv_simple(self): # type: () -> None
for (tensor_type, np_type) in [(TensorProto.FLOAT, np.float32), (TensorProto.DOUBLE, np.float64)]:
conv = helper.make_node("Conv", ["X", "W", "B"], ["Y"])
bn = helper.make_node("BatchNormalization", [
"Y", "scale", "b", "mean", "var"], ["Z"])
W = np.random.randn(3, 2, 5, 5).astype(np_type) + 2
B = np.random.randn(3,).astype(np_type) + 2
scale = np.random.randn(3,).astype(np_type) + 2
b = np.random.randn(3,).astype(np_type) + 2
mean = np.random.randn(3,).astype(np_type) + 2
var = np.abs(np.random.randn(3,).astype(np_type)) + 2
initializers = [
helper.make_tensor(name, tensor_type,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('W', W), ('B', B), ('scale', scale), ('b', b), ('mean', mean), ('var', var)]
]
graph = helper.make_graph(
[conv, bn],
"test",
[helper.make_tensor_value_info("X", tensor_type, (5, 2, 28, 28)),
helper.make_tensor_value_info("W", tensor_type, (3, 2, 5, 5)),
helper.make_tensor_value_info("B", tensor_type, (3,)),
helper.make_tensor_value_info("scale", tensor_type, (3,)),
helper.make_tensor_value_info("b", tensor_type, (3,)),
helper.make_tensor_value_info("mean", tensor_type, (3,)),
helper.make_tensor_value_info("var", tensor_type, (3,))],
[helper.make_tensor_value_info(
"Z", tensor_type, (5, 3, 24, 24))],
initializer=initializers,
value_info=[
helper.make_tensor_value_info(
"Y", tensor_type, (5, 3, 24, 24))
]
)
optimized_model = self._optimized(graph, ["fuse_bn_into_conv"])
self.assertEqual(len(optimized_model.graph.node), 1)
self.assertEqual(optimized_model.graph.node[0].op_type, 'Conv')
self.assertEqual(len(optimized_model.graph.initializer), 2)
new_W = numpy_helper.to_array(optimized_model.graph.initializer[0])
new_b = numpy_helper.to_array(optimized_model.graph.initializer[1])
f = scale / np.sqrt(var + 1e-5)
np.testing.assert_almost_equal((B - mean) * f + b, new_b)
np.testing.assert_almost_equal(
W * f[:, np.newaxis, np.newaxis, np.newaxis], new_W)
def _internal_test_deadend_elimination(self, fixed): # type: (bool) -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
exp1 = helper.make_node("Log", ["Z"], ["Z2"])
exp2 = helper.make_node("Sqrt", ["Z1"], ["Z3"])
graph = helper.make_graph(
[softmax, log, exp, exp1, exp2],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_deadend"], fixed)
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "Softmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Log"
def test_deadend_elimination_simple(self): # type: () -> None
self._internal_test_deadend_elimination(False)
def test_deadend_elimination_simple_fixed(self): # type: () -> None
self._internal_test_deadend_elimination(True)
def test_eliminate_nop_monotone_argmax_basic_no_node_axis(self): # type: () -> None
for node_name in ["Log", "Exp", "Sqrt"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
def test_eliminate_nop_monotone_argmax_basic_with_node_axis(self): # type: () -> None
for node_name in ["Softmax", "LogSoftmax"]:
for axis_n in range(3):
for axis_max in range(3):
node = helper.make_node(node_name, ["X"], ["Y"], axis=axis_n)
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis_max)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
if axis_max == axis_n:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis_max
else:
assert optimized_model.graph == graph
def test_eliminate_nop_monotone_argmax_multiple_out(self): # type: () -> None
for node_name in ["Log", "Exp", "Sqrt"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
node2 = helper.make_node(node_name, ["Y"], ["Z1"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert optimized_model.graph == graph
def test_eliminate_nop_monotone_argmax_consecutive(self): # type: () -> None
def _assertion(graph, optimized_model, axis_aligned, true_axis): # type: (GraphProto, ModelProto, bool, int) -> None
if axis_aligned:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == true_axis
else:
assert optimized_model.graph == graph
# no axis X no axis test
for node_name_0 in ["Log", "Exp", "Sqrt"]:
for node_name_1 in ["Log", "Exp", "Sqrt"]:
for axis in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(node_name_1, ["Y"], ["Y1"])
argmax = helper.make_node("ArgMax", ["Y1"], ["Z"], axis=axis)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model, True, axis)
# no axis X axis test
for node_name_0 in ["Log", "Exp", "Sqrt"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(node_name_1, ["Y"], ["Y1"], axis=axis_0)
argmax = helper.make_node("ArgMax", ["Y1"], ["Z"], axis=axis_1)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model, axis_0 == axis_1, axis_1)
# axis X axis test
for node_name_0 in ["Softmax", "LogSoftmax"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
for axis_2 in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"], axis=axis_0)
node2 = helper.make_node(node_name_1, ["Y"], ["Y1"], axis=axis_1)
argmax = helper.make_node("ArgMax", ["Y1"], ["Z"], axis=axis_2)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
if axis_0 == axis_1: # we can reduce both of the monotonic ops
_assertion(graph, optimized_model, axis_1 == axis_2, axis_2)
elif axis_1 == axis_2: # we can reduce one of the monotonic ops
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[-1].op_type == "ArgMax"
assert optimized_model.graph.node[-1].attribute[0].name == "axis"
assert optimized_model.graph.node[-1].attribute[0].i == axis_2
else: # we can't reduce anything
assert optimized_model.graph == graph
def test_eliminate_nop_dropout(self): # type: () -> None
node = helper.make_node("Dropout", ["X"], ["Y"])
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False)
# we don't want to eliminate the dropoutin opset 12,
# even when it';s an optional parameter (defaults to 0)
assert optimized_model.graph == graph
def test_eliminate_nop_dropout_opset11_graph_output(self): # type: () -> None
node = helper.make_node("Log", ["X"], ["Y"])
node1 = helper.make_node("Dropout", ["Y"], ["Z"], ratio=0.0)
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
def test_eliminate_nop_dropout_opset11(self): # type: () -> None
for ratio in [0.0, 0.5]:
node = helper.make_node("Dropout", ["X"], ["Y"], ratio=ratio)
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
if ratio > 0.0:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
def test_fuse_reduction_unsqueeze(self): # type: () -> None
def _calculate_post_transform_shape(input_shape, reduction_axes, unsqueeze_axes, keepdim): # type: (Tuple[int, ...], List[int], List[int], bool) -> Tuple[int, ...]
post_reduce_shape = None
if keepdim:
post_reduce_shape = tuple([(x if i not in reduction_axes else 1) for i, x in enumerate(input_shape)])
else:
post_reduce_shape = tuple([x for i, x in enumerate(input_shape) if i not in reduction_axes])
post_unsqueeze_shape = list(post_reduce_shape)
for ax in unsqueeze_axes:
post_unsqueeze_shape.insert(ax, 1)
return tuple(post_unsqueeze_shape)
for reduction in ["ReduceL1", "ReduceL2", "ReduceLogSum",
"ReduceLogSumExp", "ReduceMax", "ReduceMean",
"ReduceMin", "ReduceProd", "ReduceSum", "ReduceSumSquare"]:
for axes1 in [[1], [1, 2], [2]]:
for axes2 in [[1], [1, 2], [2]]:
for keepdim in [False, True]:
input_shape = (5, 7, 9)
output_shape = _calculate_post_transform_shape(input_shape, axes1, axes2, keepdim) # type: Tuple[int, ...]
node = helper.make_node(reduction, ["X"], ["Y"], axes=axes1, keepdims=keepdim)
node1 = helper.make_node("Unsqueeze", ["Y"], ["Z"], axes=axes2)
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)])
optimized_model = self._optimized(
graph, ["fuse_consecutive_reduce_unsqueeze"], False)
if keepdim or axes1 != axes2:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[-1].op_type == reduction
assert optimized_model.graph.node[-1].attribute[0].name == "axes"
assert optimized_model.graph.node[-1].attribute[0].ints == axes1
optimized_output_shape = tuple(x.dim_value for x in optimized_model.graph.output[0].type.tensor_type.shape.dim)
assert optimized_output_shape == output_shape
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from typing import Sequence
import numpy as np # type: ignore
from onnx import checker, helper
from onnx import TensorProto, GraphProto, SparseTensorProto
import onnx.onnx_cpp2py_export.checker as C
import onnx.defs
class TestChecker(unittest.TestCase):
@property
def _sample_float_tensor(self): # type: () -> TensorProto
np_array = np.random.randn(2, 3).astype(np.float32)
return helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=np_array.reshape(6).tolist()
)
def test_check_node(self): # type: () -> None
node = helper.make_node(
"Relu", ["X"], ["Y"], name="test")
checker.check_node(node)
def test_check_node_input_marked_optional(self): # type: () -> None
# GivenTensorFill's input is marked optional, hence it is used in this test.
node = helper.make_node(
"GivenTensorFill", [], ["Y"], name="test")
checker.check_node(node)
# Explicitly pass the empty string as optional
node = helper.make_node(
"GivenTensorFill", [""], ["Y"], name="test")
# Input of RELU is not optional
node = helper.make_node(
"Relu", [""], ["Y"], name="test")
self.assertRaises(checker.ValidationError, checker.check_node, node)
def test_check_graph_ir_version_3(self): # type: () -> None
ctx = C.CheckerContext()
ctx.ir_version = 3
ctx.opset_imports = {'': onnx.defs.onnx_opset_version()}
def check_ir_version_3(g): # type: (GraphProto) -> None
checker.check_graph(g, ctx)
node = helper.make_node(
"Relu", ["X"], ["Y"], name="test")
graph = helper.make_graph(
[node],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
check_ir_version_3(graph)
graph.initializer.extend([self._sample_float_tensor])
graph.initializer[0].name = 'no-exist'
self.assertRaises(checker.ValidationError, check_ir_version_3, graph)
graph.initializer[0].name = 'X'
check_ir_version_3(graph)
def test_check_graph(self): # type: () -> None
node = helper.make_node(
"Relu", ["X"], ["Y"], name="test")
graph = helper.make_graph(
[node],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
checker.check_graph(graph)
graph.initializer.extend([self._sample_float_tensor])
graph.initializer[0].name = 'no-exist'
checker.check_graph(graph)
graph.initializer[0].name = 'X'
checker.check_graph(graph)
def test_check_graph_optional_input(self): # type: () -> None
# GivenTensorFill's input is marked optional, hence it is used in this test.
node = helper.make_node(
"GivenTensorFill", [""], ["Y"], name="test")
graph = helper.make_graph(
[node],
"test",
[],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
checker.check_graph(graph)
def test_check_graph_ssa(self): # type: () -> None
relu1 = helper.make_node(
"Relu", ["X"], ["Z"], name="relu1")
relu2 = helper.make_node(
"Relu", ["Y"], ["Z"], name="relu2")
graph = helper.make_graph(
[relu1, relu2],
"test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2]),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])
],
outputs=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])
]
)
self.assertRaises(checker.ValidationError, checker.check_graph, graph)
def test_check_graph_topologically_sorted(self): # type: () -> None
n1 = helper.make_node(
"Scale", ["X"], ["Y"], scale=2., name="n1")
n2 = helper.make_node(
"Scale", ["Y"], ["Z"], scale=3., name="n2")
graph = helper.make_graph(
[n2, n1],
"test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])
],
outputs=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])
]
)
self.assertRaises(checker.ValidationError, checker.check_graph, graph)
def test_check_model(self): # type: () -> None
node = helper.make_node(
"Relu", ["X"], ["Y"], name="test")
graph = helper.make_graph(
[node],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
model = helper.make_model(graph, producer_name='test')
checker.check_model(model)
def test_check_old_model(self): # type: () -> None
node = helper.make_node(
"Pad", ["X"], ["Y"], paddings=(0, 0, 0, 0))
graph = helper.make_graph(
[node],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
onnx_id = helper.make_opsetid("", 1)
model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id])
checker.check_model(model)
def test_check_tensor(self): # type: () -> None
tensor = self._sample_float_tensor
checker.check_tensor(tensor)
tensor.raw_data = np.random.randn(2, 3).astype(np.float32).tobytes()
self.assertRaises(checker.ValidationError, checker.check_tensor, tensor)
def test_check_string_tensor(self): # type: () -> None
tensor = TensorProto()
tensor.data_type = TensorProto.STRING
tensor.dims.append(1)
tensor.string_data.append('Test'.encode('utf-8'))
checker.check_tensor(tensor)
del tensor.string_data[:]
tensor.raw_data = 'Test'.encode('utf-8')
# string data should not be stored in raw_data field
self.assertRaises(checker.ValidationError, checker.check_tensor, tensor)
def test_check_tensor_mismatched_field(self): # type: () -> None
tensor = self._sample_float_tensor
tensor.data_type = TensorProto.INT32
self.assertRaises(checker.ValidationError, checker.check_tensor, tensor)
def test_nested_graph(self): # type: () -> None
n1 = helper.make_node(
"Scale", ["X"], ["Y"], scale=2., name="n1")
n2 = helper.make_node(
"Scale", ["Y"], ["Z"], scale=3., name="n2")
graph = helper.make_graph(
[n1, n2],
"nested",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])
],
outputs=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])
]
)
i1 = helper.make_node(
"If", ["cond"], ["Z"], then_branch=graph, else_branch=graph)
graph = helper.make_graph(
[i1],
"test",
inputs=[
helper.make_tensor_value_info("cond", TensorProto.BOOL, [1]),
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])
],
outputs=[helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])],
)
checker.check_graph(graph)
#self.assertRaises(checker.ValidationError, checker.check_graph, graph)
def test_nested_graph_without_subgraph_input_shape(self): # type: () -> None
n1 = helper.make_node(
"Scale", ["X"], ["Y"], scale=2., name="n1")
n2 = helper.make_node(
"Scale", ["Y"], ["Z"], scale=3., name="n2")
input_x = onnx.ValueInfoProto()
input_x.name = "X"
graph = helper.make_graph(
[n1, n2],
"nested",
inputs=[
input_x
],
outputs=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])
]
)
i1 = helper.make_node(
"If", ["cond"], ["Z"], then_branch=graph, else_branch=graph)
graph = helper.make_graph(
[i1],
"test",
inputs=[
helper.make_tensor_value_info("cond", TensorProto.BOOL, [1]),
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])
],
outputs=[helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])],
)
checker.check_graph(graph)
@property
def _sample_0_elem_tensor(self): # type: () -> TensorProto
np_array = np.random.randn(0, 3).astype(np.float32)
return helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(0, 3),
vals=np_array.reshape(0).tolist()
)
def test_check_tensor_zero_elem(self): # type: () -> None
tensor = self._sample_0_elem_tensor
checker.check_tensor(tensor)
def test_check_removed_experimental_op(self): # type: () -> None
node = helper.make_node(
"ConstantFill", [], ["Y"], name="test", shape=[1, 2])
checker.check_node(node)
def test_skip_schema_check_on_non_standard_domain(self): # type: () -> None
node = helper.make_node(
"NonExistOp", ["X"], ["Y"], name="test", domain="test.domain")
graph = helper.make_graph(
[node],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
onnx_id = helper.make_opsetid("test.domain", 1)
model = helper.make_model(graph, producer_name='test',
opset_imports=[onnx_id])
checker.check_model(model)
def make_sparse(self,
shape, # type: Sequence[int]
values, # type: Sequence[int]
indices_shape, # type: Sequence[int]
indices # type: Sequence[int]
): # type: (...) -> SparseTensorProto
sparse = SparseTensorProto()
sparse.dims.extend(shape)
nnz = len(values)
sparse.values.CopyFrom(helper.make_tensor('spval', TensorProto.INT64, (nnz,), values))
sparse.indices.CopyFrom(helper.make_tensor('spind', TensorProto.INT64, indices_shape, indices))
return sparse
def test_check_sparse_tensor(self): # type: () -> None
sparse = self.make_sparse([100], [13, 17, 19], [3], [9, 27, 81])
checker.check_sparse_tensor(sparse)
def test_check_sparse_tensor_invalid_index(self): # type: () -> None
# index value 181 is out-of-range
sparse = self.make_sparse([100], [13, 17, 19], [3], [9, 27, 181])
self.assertRaises(checker.ValidationError, checker.check_sparse_tensor, sparse)
def test_check_sparse_tensor_unordered(self): # type: () -> None
# index values are not in sorted order
sparse = self.make_sparse([100], [13, 17, 19], [3], [27, 9, 81])
self.assertRaises(checker.ValidationError, checker.check_sparse_tensor, sparse)
def test_check_sparse_tensor_coo_format(self): # type: () -> None
sparse = self.make_sparse([10, 10], [13, 17, 19], [3, 2], [0, 9, 2, 7, 8, 1])
checker.check_sparse_tensor(sparse)
def test_check_sparse_tensor_coo_format_invalid_index(self): # type: () -> None
sparse = self.make_sparse([10, 10], [13, 17, 19], [3, 2], [0, 9, 0, 27, 8, 1])
self.assertRaises(checker.ValidationError, checker.check_sparse_tensor, sparse)
def test_check_sparse_tensor_coo_format_invalid_shape(self): # type: () -> None
sparse = self.make_sparse([10, 10], [13, 17, 19], [2, 3], [0, 9, 2, 7, 8, 1])
self.assertRaises(checker.ValidationError, checker.check_sparse_tensor, sparse)
def test_check_sparse_tensor_coo_format_invalid_dim2(self): # type: () -> None
sparse = self.make_sparse([10, 10], [13, 17, 19], [3, 1], [0, 1, 2])
self.assertRaises(checker.ValidationError, checker.check_sparse_tensor, sparse)
def test_check_sparse_matmul(self): # type: () -> None
M = 5
N = 10
# Create ValueInfoProto for input X of shape [N]
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [N])
# Create a [M,N] sparse-matrix constant C
sparse_tensor = self.make_sparse([M, N], [2, 3, 1], [3], [3, 11, 37])
node1 = helper.make_node('Constant', [], ['C'], sparse_value=sparse_tensor)
# Create ValueInfoProto for output Y of shape [M]
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [M])
# Compute Y = C X
node2 = helper.make_node('MatMul', ['C', 'X'], ['Y'])
# create graph
graph = helper.make_graph([node1, node2], "sparse_matmul", [X], [Y])
# check graph
checker.check_graph(graph)
def test_check_model_unsupported_input_type(self): # type: () -> None
N = 10
X = helper.make_tensor_value_info('X', TensorProto.BOOL, [N])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [N])
Z = helper.make_tensor_value_info('Z', TensorProto.FLOAT, [N])
onnx_id = helper.make_opsetid("", 6)
node = helper.make_node('Add', ['X', 'Y'], ['Z'])
graph = helper.make_graph([node], "test_add_input", [X, Y], [Z])
model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id])
self.assertRaises(checker.ValidationError, checker.check_model, model, True)
def test_check_modle_inconsistent_type(self): # type: () -> None
N = 10
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [N])
Y = helper.make_tensor_value_info('Y', TensorProto.INT32, [N])
Z = helper.make_tensor_value_info('Z', TensorProto.FLOAT, [N])
onnx_id = helper.make_opsetid("", 6)
node = helper.make_node('Add', ['X', 'Y'], ['Z'])
graph = helper.make_graph([node], "test_add_input", [X, Y], [Z])
model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id])
self.assertRaises(checker.ValidationError, checker.check_model, model, True)
def test_check_model_unsupported_output_type(self): # type: () -> None
N = 10
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [N])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [N])
Z = helper.make_tensor_value_info('Z', TensorProto.BOOL, [N])
onnx_id = helper.make_opsetid("", 6)
node = helper.make_node('Add', ['X', 'Y'], ['Z'])
graph = helper.make_graph([node], "test_add_input", [X, Y], [Z])
model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id])
self.assertRaises(RuntimeError, checker.check_model, model, True)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
import numpy as np # type: ignore
from onnx import helper, defs, numpy_helper, checker
from onnx import AttributeProto, TensorProto, GraphProto
from typing import Text, Any, List
import unittest
class TestHelperAttributeFunctions(unittest.TestCase):
def test_attr_float(self): # type: () -> None
# float
attr = helper.make_attribute("float", 1.)
self.assertEqual(attr.name, "float")
self.assertEqual(attr.f, 1.)
checker.check_attribute(attr)
# float with scientific
attr = helper.make_attribute("float", 1e10)
self.assertEqual(attr.name, "float")
self.assertEqual(attr.f, 1e10)
checker.check_attribute(attr)
def test_attr_int(self): # type: () -> None
# integer
attr = helper.make_attribute("int", 3)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 3)
checker.check_attribute(attr)
# long integer
attr = helper.make_attribute("int", 5)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 5)
checker.check_attribute(attr)
# octinteger
attr = helper.make_attribute("int", 0o1701)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 0o1701)
checker.check_attribute(attr)
# hexinteger
attr = helper.make_attribute("int", 0x1701)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 0x1701)
checker.check_attribute(attr)
def test_attr_doc_string(self): # type: () -> None
attr = helper.make_attribute("a", "value")
self.assertEqual(attr.name, "a")
self.assertEqual(attr.doc_string, "")
attr = helper.make_attribute("a", "value", "doc")
self.assertEqual(attr.name, "a")
self.assertEqual(attr.doc_string, "doc")
def test_attr_string(self): # type: () -> None
# bytes
attr = helper.make_attribute("str", b"test")
self.assertEqual(attr.name, "str")
self.assertEqual(attr.s, b"test")
checker.check_attribute(attr)
# unspecified
attr = helper.make_attribute("str", "test")
self.assertEqual(attr.name, "str")
self.assertEqual(attr.s, b"test")
checker.check_attribute(attr)
# unicode
attr = helper.make_attribute("str", u"test")
self.assertEqual(attr.name, "str")
self.assertEqual(attr.s, b"test")
checker.check_attribute(attr)
# empty str
attr = helper.make_attribute("str", "")
self.assertEqual(attr.name, "str")
self.assertEqual(helper.get_attribute_value(attr), b"")
checker.check_attribute(attr)
def test_attr_repeated_float(self): # type: () -> None
attr = helper.make_attribute("floats", [1.0, 2.0])
self.assertEqual(attr.name, "floats")
self.assertEqual(list(attr.floats), [1.0, 2.0])
checker.check_attribute(attr)
def test_attr_repeated_int(self): # type: () -> None
attr = helper.make_attribute("ints", [1, 2])
self.assertEqual(attr.name, "ints")
self.assertEqual(list(attr.ints), [1, 2])
checker.check_attribute(attr)
def test_attr_repeated_str(self): # type: () -> None
attr = helper.make_attribute("strings", ["str1", "str2"])
self.assertEqual(attr.name, "strings")
self.assertEqual(list(attr.strings), [b"str1", b"str2"])
checker.check_attribute(attr)
def test_attr_repeated_tensor_proto(self): # type: () -> None
tensors = [
helper.make_tensor(
name='a',
data_type=TensorProto.FLOAT,
dims=(1,),
vals=np.ones(1).tolist()
),
helper.make_tensor(
name='b',
data_type=TensorProto.FLOAT,
dims=(1,),
vals=np.ones(1).tolist()
)]
attr = helper.make_attribute("tensors", tensors)
self.assertEqual(attr.name, "tensors")
self.assertEqual(list(attr.tensors), tensors)
checker.check_attribute(attr)
def test_attr_repeated_graph_proto(self): # type: () -> None
graphs = [GraphProto(), GraphProto()]
graphs[0].name = "a"
graphs[1].name = "b"
attr = helper.make_attribute("graphs", graphs)
self.assertEqual(attr.name, "graphs")
self.assertEqual(list(attr.graphs), graphs)
checker.check_attribute(attr)
def test_is_attr_legal(self): # type: () -> None
# no name, no field
attr = AttributeProto()
self.assertRaises(checker.ValidationError, checker.check_attribute, attr)
# name, but no field
attr = AttributeProto()
attr.name = "test"
self.assertRaises(checker.ValidationError, checker.check_attribute, attr)
# name, with two fields
attr = AttributeProto()
attr.name = "test"
attr.f = 1.0
attr.i = 2
self.assertRaises(checker.ValidationError, checker.check_attribute, attr)
def test_is_attr_legal_verbose(self): # type: () -> None
def _set(attr, type, var, value): # type: (AttributeProto, AttributeProto.AttributeType, Text, Any) -> None
setattr(attr, var, value)
setattr(attr, 'type', type)
def _extend(attr, type, var, value): # type: (AttributeProto, AttributeProto.AttributeType, List[Any], Any) -> None
var.extend(value)
setattr(attr, 'type', type)
SET_ATTR = [
(lambda attr: _set(attr, AttributeProto.FLOAT, "f", 1.0)),
(lambda attr: _set(attr, AttributeProto.INT, "i", 1)),
(lambda attr: _set(attr, AttributeProto.STRING, "s", b"str")),
(lambda attr: _extend(attr, AttributeProto.FLOATS, attr.floats, [1.0, 2.0])),
(lambda attr: _extend(attr, AttributeProto.INTS, attr.ints, [1, 2])),
(lambda attr: _extend(attr, AttributeProto.STRINGS, attr.strings, [b"a", b"b"])),
]
# Randomly set one field, and the result should be legal.
for _i in range(100):
attr = AttributeProto()
attr.name = "test"
random.choice(SET_ATTR)(attr)
checker.check_attribute(attr)
# Randomly set two fields, and then ensure helper function catches it.
for _i in range(100):
attr = AttributeProto()
attr.name = "test"
for func in random.sample(SET_ATTR, 2):
func(attr)
self.assertRaises(checker.ValidationError,
checker.check_attribute,
attr)
class TestHelperNodeFunctions(unittest.TestCase):
def test_node_no_arg(self): # type: () -> None
self.assertTrue(defs.has("Relu"))
node_def = helper.make_node(
"Relu", ["X"], ["Y"], name="test")
self.assertEqual(node_def.op_type, "Relu")
self.assertEqual(node_def.name, "test")
self.assertEqual(list(node_def.input), ["X"])
self.assertEqual(list(node_def.output), ["Y"])
def test_attr_doc_string(self): # type: () -> None
node_def = helper.make_node(
"Relu", ["X"], ["Y"], name="test", doc_string="doc")
self.assertEqual(node_def.doc_string, "doc")
def test_node_with_arg(self): # type: () -> None
self.assertTrue(defs.has("Relu"))
# Note: Relu actually does not need an arg, but let's
# test it.
node_def = helper.make_node(
"Relu", ["X"], ["Y"],
arg_value=1)
self.assertEqual(node_def.op_type, "Relu")
self.assertEqual(list(node_def.input), ["X"])
self.assertEqual(list(node_def.output), ["Y"])
self.assertEqual(len(node_def.attribute), 1)
self.assertEqual(
node_def.attribute[0],
helper.make_attribute("arg_value", 1))
def test_node_domain(self): # type: () -> None
node_def = helper.make_node(
"Relu", ["X"], ["Y"], name="test", doc_string="doc", domain="test.domain")
self.assertEqual(node_def.domain, "test.domain")
def test_graph(self): # type: () -> None
node_def1 = helper.make_node(
"Relu", ["X"], ["Y"])
node_def2 = helper.make_node(
"Add", ["X", "Y"], ["Z"])
value_info = [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])]
graph = helper.make_graph(
[node_def1, node_def2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])],
doc_string=None,
value_info=value_info,
)
self.assertEqual(graph.name, "test")
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0], node_def1)
self.assertEqual(graph.node[1], node_def2)
self.assertEqual(graph.doc_string, "")
self.assertEqual(graph.value_info[0], value_info[0])
def test_graph_docstring(self): # type: () -> None
graph = helper.make_graph([], "my graph", [], [], None, "my docs")
self.assertEqual(graph.name, "my graph")
self.assertEqual(graph.doc_string, "my docs")
def test_model(self): # type: () -> None
node_def = helper.make_node(
"Relu", ["X"], ["Y"])
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
self.assertRaises(AttributeError, helper.make_model, graph_def, xxx=1)
model_def = helper.make_model(graph_def, producer_name='test')
self.assertEqual(model_def.producer_name, 'test')
def test_model_docstring(self): # type: () -> None
graph = helper.make_graph([], "my graph", [], [])
model_def = helper.make_model(graph, doc_string='test')
# models may have their own documentation, but don't have a name
# their name is the domain-qualified name of the underlying graph.
self.assertFalse(hasattr(model_def, "name"))
self.assertEqual(model_def.doc_string, 'test')
def test_model_metadata_props(self): # type: () -> None
graph = helper.make_graph([], "my graph", [], [])
model_def = helper.make_model(graph, doc_string='test')
helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
checker.check_model(model_def)
helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
checker.check_model(model_def) # helper replaces, so no dupe
dupe = model_def.metadata_props.add()
dupe.key = 'Title'
dupe.value = 'Other'
self.assertRaises(checker.ValidationError, checker.check_model, model_def)
class TestHelperTensorFunctions(unittest.TestCase):
def test_make_tensor(self): # type: () -> None
np_array = np.random.randn(2, 3).astype(np.float32)
tensor = helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=np_array.reshape(6).tolist()
)
self.assertEqual(tensor.name, 'test')
np.testing.assert_equal(np_array, numpy_helper.to_array(tensor))
# use raw_data field to store the data
tensor = helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=np_array.reshape(6).tobytes(),
raw=True,
)
np.testing.assert_equal(np_array, numpy_helper.to_array(tensor))
string_list = list(s.encode('utf-8') for s in ['Amy', 'Billy', 'Cindy', 'David'])
tensor = helper.make_tensor(
name='test',
data_type=TensorProto.STRING,
dims=(2, 2),
vals=string_list,
raw=False
)
self.assertEqual(string_list, list(tensor.string_data))
def test_make_sparse_tensor(self): # type: () -> None
values = [1.1, 2.2, 3.3, 4.4, 5.5]
values_tensor = helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(5, ),
vals=values
)
indices = [1, 3, 5, 7, 9]
indices_tensor = helper.make_tensor(
name='test_indices',
data_type=TensorProto.INT64,
dims=(5, ),
vals=indices
)
dense_shape = [10]
sparse = helper.make_sparse_tensor(values_tensor, indices_tensor, dense_shape)
self.assertEqual(sparse.values, values_tensor)
self.assertEqual(sparse.indices, indices_tensor)
self.assertEqual(sparse.dims, dense_shape)
def test_make_tensor_value_info(self): # type: () -> None
vi = helper.make_tensor_value_info('X', TensorProto.FLOAT, (2, 4))
checker.check_value_info(vi)
# scalar value
vi = helper.make_tensor_value_info('Y', TensorProto.FLOAT, ())
checker.check_value_info(vi)
class TestPrintableGraph(unittest.TestCase):
def test_initializer_with_matching_graph_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y_Initializer"], ["Z"])
value_info = [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1])]
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1]),
helper.make_tensor_value_info("Y_Initializer", TensorProto.FLOAT, [1])], # inputs
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1])], # outputs
[helper.make_tensor("Y_Initializer", TensorProto.FLOAT, [1], [1])], # initializers
doc_string=None,
value_info=value_info
)
graph_str = helper.printable_graph(graph)
self.assertTrue(''') optional inputs with matching initializers (
%Y_Initializer[FLOAT, 1]''' in graph_str, graph_str)
def test_initializer_no_matching_graph_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y_Initializer"], ["Z"])
value_info = [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1])]
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1])], # inputs
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1])], # outputs
[helper.make_tensor("Y_Initializer", TensorProto.FLOAT, [1], [1])], # initializers
doc_string=None,
value_info=value_info
)
graph_str = helper.printable_graph(graph)
self.assertTrue(''') initializers (
%Y_Initializer[FLOAT, 1]''' in graph_str, graph_str)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import onnx.utils
from onnx import helper, TensorProto
class TestUtilityFunctions(unittest.TestCase):
def test_polish_model(self): # type: () -> None
node_def = helper.make_node(
"Relu", ["X"], ["Y"], doc_string="ABC")
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
model_def = helper.make_model(graph_def, producer_name='test')
polished_def = onnx.utils.polish_model(model_def)
self.assertEqual(polished_def.producer_name, 'test')
self.assertEqual(len(polished_def.graph.node), 1)
self.assertFalse(polished_def.graph.node[0].HasField('doc_string'))
if __name__ == '__main__':
unittest.main()
|
import unittest
from onnx import defs, AttributeProto
class TestSchema(unittest.TestCase):
def test_get_schema(self): # type: () -> None
defs.get_schema("Relu")
def test_typecheck(self): # type: () -> None
defs.get_schema("Conv")
def test_attr_default_value(self): # type: () -> None
v = defs.get_schema(
"BatchNormalization").attributes['epsilon'].default_value
self.assertEqual(type(v), AttributeProto)
self.assertEqual(v.type, AttributeProto.FLOAT)
if __name__ == '__main__':
unittest.main()
|
import tempfile
import unittest
import uuid
import numpy as np # type: ignore
import shutil
import os
import os.path as Path
import onnx
from onnx import checker, helper
from onnx import ModelProto, TensorProto
from onnx.external_data_helper import set_external_data
from onnx.external_data_helper import convert_model_to_external_data
from onnx.external_data_helper import convert_model_from_external_data
from onnx.external_data_helper import load_external_data_for_model
from onnx.numpy_helper import to_array, from_array
from typing import Any, Tuple, Text, List
class TestLoadExternalData(unittest.TestCase):
def setUp(self): # type: () -> None
self.temp_dir = tempfile.mkdtemp() # type: Text
self.initializer_value = np.arange(6).reshape(3, 2).astype(np.float32) + 512
self.attribute_value = np.arange(6).reshape(2, 3).astype(np.float32) + 256
self.model_filename = self.create_test_model()
def tearDown(self): # type: () -> None
shutil.rmtree(self.temp_dir)
def get_temp_model_filename(self): # type: () -> Text
return os.path.join(self.temp_dir, str(uuid.uuid4()) + '.onnx')
def create_external_data_tensor(self, value, tensor_name): # type: (List[Any], Text) -> TensorProto
tensor = from_array(np.array(value))
tensor.name = tensor_name
tensor_filename = "{}.bin".format(tensor_name)
set_external_data(tensor, location=tensor_filename)
with open(os.path.join(self.temp_dir, tensor_filename), 'wb') as data_file:
data_file.write(tensor.raw_data)
tensor.ClearField('raw_data')
tensor.data_location = onnx.TensorProto.EXTERNAL
return tensor
def create_test_model(self): # type: () -> Text
constant_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=self.create_external_data_tensor(self.attribute_value, "attribute_value")
)
initializers = [self.create_external_data_tensor(self.initializer_value, "input_value")]
inputs = [helper.make_tensor_value_info("input_value",
onnx.TensorProto.FLOAT,
self.initializer_value.shape)]
graph = helper.make_graph([constant_node], "test_graph",
inputs=inputs, outputs=[],
initializer=initializers)
model = helper.make_model(graph)
model_filename = os.path.join(self.temp_dir, "model.onnx")
with open(model_filename, "wb") as model_file:
model_file.write(model.SerializeToString())
return model_filename
def test_check_model(self): # type: () -> None
checker.check_model(self.model_filename)
def test_load_external_data(self): # type: () -> None
model = onnx.load_model(self.model_filename)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
def test_load_external_data_for_model(self): # type: () -> None
model = onnx.load_model(self.model_filename, load_external_data=False)
load_external_data_for_model(model, self.temp_dir)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
def test_save_external_data(self): # type: () -> None
model = onnx.load_model(self.model_filename)
temp_dir = os.path.join(self.temp_dir, "save_copy")
os.mkdir(temp_dir)
new_model_filename = os.path.join(temp_dir, 'model.onnx')
onnx.save_model(model, new_model_filename)
new_model = onnx.load_model(new_model_filename)
initializer_tensor = new_model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = new_model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
class TestLoadExternalDataSingleFile(unittest.TestCase):
def setUp(self): # type: () -> None
self.temp_dir = tempfile.mkdtemp() # type: Text
self.initializer_value = np.arange(6).reshape(3, 2).astype(np.float32) + 512
self.attribute_value = np.arange(6).reshape(2, 3).astype(np.float32) + 256
self.model_filename = self.create_test_model()
def tearDown(self): # type: () -> None
shutil.rmtree(self.temp_dir)
def get_temp_model_filename(self): # type: () -> Text
return os.path.join(self.temp_dir, str(uuid.uuid4()) + '.onnx')
def create_external_data_tensors(self, tensors_data): # type: (List[Tuple[List[Any],Any]]) -> List[TensorProto]
tensor_filename = "tensors.bin"
tensors = []
with open(os.path.join(self.temp_dir, tensor_filename), 'ab') as data_file:
for (value, tensor_name) in tensors_data:
tensor = from_array(np.array(value))
offset = data_file.tell()
if offset % 4096 != 0:
data_file.write(b"\0" * (4096 - offset % 4096))
offset = offset + 4096 - offset % 4096
data_file.write(tensor.raw_data)
set_external_data(tensor, location=tensor_filename, offset=offset, length=data_file.tell() - offset)
tensor.name = tensor_name
tensor.ClearField("raw_data")
tensor.data_location = onnx.TensorProto.EXTERNAL
tensors.append(tensor)
return tensors
def create_test_model(self): # type: () -> Text
tensors = self.create_external_data_tensors([
(self.attribute_value, "attribute_value"),
(self.initializer_value, "input_value"),
])
constant_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=tensors[0]
)
inputs = [helper.make_tensor_value_info("input_value",
onnx.TensorProto.FLOAT,
self.initializer_value.shape)]
graph = helper.make_graph([constant_node], "test_graph",
inputs=inputs, outputs=[],
initializer=[tensors[1]])
model = helper.make_model(graph)
model_filename = os.path.join(self.temp_dir, 'model.onnx')
with open(model_filename, "wb") as model_file:
model_file.write(model.SerializeToString())
return model_filename
def test_check_model(self): # type: () -> None
checker.check_model(self.model_filename)
def test_load_external_single_file_data(self): # type: () -> None
model = onnx.load_model(self.model_filename)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
def test_save_external_single_file_data(self): # type: () -> None
model = onnx.load_model(self.model_filename)
temp_dir = os.path.join(self.temp_dir, "save_copy")
os.mkdir(temp_dir)
new_model_filename = os.path.join(temp_dir, 'model.onnx')
onnx.save_model(model, new_model_filename)
new_model = onnx.load_model(new_model_filename)
initializer_tensor = new_model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = new_model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
class TestSaveAllTensorsAsExternalData(unittest.TestCase):
def setUp(self): # type: () -> None
self.temp_dir = tempfile.mkdtemp() # type: Text
self.initializer_value = np.arange(6).reshape(3, 2).astype(np.float32) + 512
self.attribute_value = np.arange(6).reshape(2, 3).astype(np.float32) + 256
self.model = self.create_test_model()
def tearDown(self): # type: () -> None
shutil.rmtree(self.temp_dir)
def get_temp_model_filename(self): # type: () -> Text
return os.path.join(self.temp_dir, str(uuid.uuid4()) + '.onnx')
def create_data_tensors(self, tensors_data): # type: (List[Tuple[List[Any],Any]]) -> List[TensorProto]
tensors = []
for (value, tensor_name) in tensors_data:
tensor = from_array(np.array(value))
tensor.name = tensor_name
tensors.append(tensor)
return tensors
def create_test_model(self): # type: () -> ModelProto
tensors = self.create_data_tensors([
(self.attribute_value, "attribute_value"),
(self.initializer_value, "input_value"),
])
constant_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=tensors[0]
)
inputs = [helper.make_tensor_value_info("input_value",
onnx.TensorProto.FLOAT,
self.initializer_value.shape)]
graph = helper.make_graph([constant_node], "test_graph",
inputs=inputs, outputs=[],
initializer=[tensors[1]])
return helper.make_model(graph)
def test_check_model(self): # type: () -> None
checker.check_model(self.model)
def test_convert_model_to_from_one_file(self): # type: () -> None
model_file_path = self.get_temp_model_filename()
external_data_file = str(uuid.uuid4())
convert_model_to_external_data(self.model, location=external_data_file)
onnx.save_model(self.model, model_file_path)
self.assertTrue(Path.isfile(model_file_path))
self.assertTrue(Path.isfile(os.path.join(self.temp_dir, external_data_file)))
model = onnx.load_model(model_file_path)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
# test convert model from external data
convert_model_from_external_data(model)
model_file_path = self.get_temp_model_filename()
onnx.save_model(model, model_file_path)
model = onnx.load_model(model_file_path)
initializer_tensor = model.graph.initializer[0]
self.assertFalse(len(initializer_tensor.external_data))
self.assertEqual(initializer_tensor.data_location, TensorProto.DEFAULT)
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertFalse(len(initializer_tensor.external_data))
self.assertEqual(attribute_tensor.data_location, TensorProto.DEFAULT)
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
def test_convert_model_to_external_data_one_file_per_tensor(self): # type: () -> None
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(self.model, all_tensors_to_one_file=False)
onnx.save_model(self.model, model_file_path)
self.assertTrue(Path.isfile(model_file_path))
self.assertTrue(Path.isfile(os.path.join(self.temp_dir, "input_value")))
self.assertTrue(Path.isfile(os.path.join(self.temp_dir, "attribute_value")))
model = onnx.load_model(model_file_path)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertTrue(np.allclose(to_array(attribute_tensor), self.attribute_value))
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto, OperatorSetIdProto
from typing import Sequence, Text, Tuple, List, Callable
from onnx import numpy_helper
import numpy as np # type: ignore
import struct
import onnx.version_converter
import unittest
class TestVersionConverter(unittest.TestCase):
def _converted(
self,
graph, # type: GraphProto
initial_version, # type: OperatorSetIdProto
target_version # type: int
): # type: (...) -> ModelProto
orig_model = helper.make_model(graph, producer_name='onnx-test', opset_imports=[initial_version])
# print(type(orig_model))
converted_model = onnx.version_converter.convert_version(orig_model,
target_version)
checker.check_model(converted_model)
return converted_model
# Test 1: Backwards Incompatible Conversion: Reshape: 8 -> 2
def test_backwards_incompatible(self): # type: () -> None
def test(): # type: () -> None
nodes = [helper.make_node('Add', ["W", "Z"], ["shape"]),
helper.make_node('Reshape', ["X", "shape"], ["A"]),
helper.make_node('Add', ["A", "W"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
self._converted(graph, helper.make_operatorsetid("", 8), 2)
self.assertRaises(RuntimeError, test)
# Test 2: Backwards Compatible Conversion (No Adaptations): Add: 3 -> 2
def test_backwards_compatible(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 3), 2)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Add"
assert converted_model.opset_import[0].version == 2
# Test 3: Non-Existent Op Conversion: Cos: 8 -> 6
def test_non_existent_op(self): # type: () -> None
def test(): # type: () -> None
nodes = [helper.make_node('Cos', ["X"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
self._converted(graph, helper.make_operatorsetid("", 8), 6)
self.assertRaises(RuntimeError, test)
# Test Add Adapter: 8 -> 5
def test_add_8_5(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Add"
assert converted_model.opset_import[0].version == 5
# Test Add Adapter: 5 -> 8
def test_add_5_8(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Add"
assert converted_model.opset_import[0].version == 8
# Test Add Adapter: 5 -> 8, requiring insertion of an Unsqueeze node
def test_add_5_8_with_unsqueeze(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"], axis=0, broadcast=1)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Unsqueeze"
assert converted_model.graph.node[1].op_type == "Add"
assert converted_model.opset_import[0].version == 8
# Test Mul Adapter: 8 -> 5
def test_mul_8_5(self): # type: () -> None
nodes = [helper.make_node('Mul', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Mul"
assert converted_model.opset_import[0].version == 5
# Test Mul Adapter: 5 -> 8
def test_mul_5_8(self): # type: () -> None
nodes = [helper.make_node('Mul', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Mul"
assert converted_model.opset_import[0].version == 8
# Test Gemm Adapter: 1 -> 8
def test_gemm_up(self): # type: () -> None
nodes = [helper.make_node('Gemm', ["A", "B", "C"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Gemm"
assert converted_model.opset_import[0].version == 8
# Test Gemm Adapter: 8 -> 1
def test_gemm_down(self): # type: () -> None
nodes = [helper.make_node('Gemm', ["A", "B", "C"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Gemm"
assert converted_model.opset_import[0].version == 1
# Test Relu Adapter: 5 -> 7
def test_relu_5_7(self): # type: () -> None
nodes = [helper.make_node('Relu', ["X"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 7)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Relu"
assert converted_model.opset_import[0].version == 7
# Test Relu Adapter: 7 -> 5
def test_relu_7_5(self): # type: () -> None
nodes = [helper.make_node('Relu', ["X"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 7), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Relu"
assert converted_model.opset_import[0].version == 5
# Test BatchNormalization Adapter: 8 -> 5
def test_batch_normalization_8_5(self): # type: () -> None
nodes = [helper.make_node('BatchNormalization', ["X", "scale", "B",
"mean", "var"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("var", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == 5
# Test BatchNormalization Adapter: 5 -> 8
def test_batch_normalization_5_8(self): # type: () -> None
nodes = [helper.make_node('BatchNormalization', ["X", "scale", "B",
"mean", "var"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("var", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == 8
# Test Concat Adapter: 3 -> 5
def test_concat_3_5(self): # type: () -> None
nodes = [helper.make_node('Concat', ["X1", "X2", "X3",
"X4", "X5"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X3", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X4", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X5", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 3), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Concat"
assert converted_model.opset_import[0].version == 5
# Test Concat Adapter: 5 -> 3
def test_concat_5_3(self): # type: () -> None
nodes = [helper.make_node('Concat', ["X1", "X2", "X3",
"X4", "X5"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X3", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X4", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X5", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 3)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Concat"
assert converted_model.opset_import[0].version == 3
# Test Reshape Adapter: 6 -> 4
def test_reshape_6_4(self): # type: () -> None
nodes = [helper.make_node('Constant', [], ["shape"],
value=helper.make_tensor("", TensorProto.INT64, [1],
[5])),
helper.make_node('Reshape', ["X", "shape"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 6), 4)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Reshape"
assert converted_model.opset_import[0].version == 4
# Test Reshape Adapter: 4 -> 6
def test_reshape_4_6(self): # type: () -> None
nodes = [helper.make_node('Reshape', ["X"], ["Y"], shape=[5])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 4), 6)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Reshape"
assert converted_model.opset_import[0].version == 6
# Test Sum Adapter: 7 -> 8
def test_sum_7_8(self): # type: () -> None
nodes = [helper.make_node('Sum', ["data_0", "data_1", "data_2",
"data_3", "data_4"], ["sum"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_2", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_3", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_4", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("sum", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 7), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Sum"
assert converted_model.opset_import[0].version == 8
# Test Sum Adapter: 5 -> 8
def test_sum_5_8(self): # type: () -> None
nodes = [helper.make_node('Sum', ["data_0", "data_1", "data_2",
"data_3", "data_4"], ["sum"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_2", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_3", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_4", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("sum", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 7)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Sum"
assert converted_model.opset_import[0].version == 7
# Test Sum Adapter: 8 -> 5
def test_sum_8_5(self): # type: () -> None
nodes = [helper.make_node('Sum', ["data_0", "data_1", "data_2",
"data_3", "data_4"], ["sum"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_2", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_3", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_4", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("sum", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Sum"
assert converted_model.opset_import[0].version == 5
# Test AveragePool Adapter: 1 -> 8
def test_averagepool_up(self): # type: () -> None
nodes = [helper.make_node('AveragePool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "AveragePool"
assert converted_model.opset_import[0].version == 8
# Test AveragePool Adapter: 8 -> 1
def test_averagepool_down(self): # type: () -> None
nodes = [helper.make_node('AveragePool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "AveragePool"
assert converted_model.opset_import[0].version == 1
# Test Dropout Adapter: 1 -> 8
def test_dropout_up(self): # type: () -> None
nodes = [helper.make_node('Dropout', ["data"], ["output"], is_test=1)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("output", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Dropout"
assert converted_model.opset_import[0].version == 8
# Test Dropout Adapter: 8 -> 1
def test_dropout_down(self): # type: () -> None
nodes = [helper.make_node('Dropout', ["data"], ["output"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("output", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Dropout"
assert converted_model.opset_import[0].version == 1
# Test Max Adapter: 7 -> 8
def test_max_7_8(self): # type: () -> None
from_opset = 7
to_opset = 8
data_type = TensorProto.FLOAT
data_shape = (2, 3, 4)
nodes = [onnx.helper.make_node(
"Max",
inputs=["X"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_max",
[onnx.helper.make_tensor_value_info("X", data_type, data_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, data_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Max"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Min Adapter: 7 -> 8
def test_min_7_8(self): # type: () -> None
from_opset = 7
to_opset = 8
data_type = TensorProto.FLOAT
data_shape = (2, 3, 4)
nodes = [onnx.helper.make_node(
"Min",
inputs=["X"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_min",
[onnx.helper.make_tensor_value_info("X", data_type, data_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, data_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Min"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Mean Adapter: 7 -> 8
def test_mean_7_8(self): # type: () -> None
from_opset = 7
to_opset = 8
data_type = TensorProto.FLOAT
data_shape = (3,)
nodes = [onnx.helper.make_node(
"Mean",
inputs=["X"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_mean",
[onnx.helper.make_tensor_value_info("X", data_type, data_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, data_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Mean"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test MaxPool Adapter: 1 -> 8
def test_maxpool_up(self): # type: () -> None
nodes = [helper.make_node('MaxPool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "MaxPool"
assert converted_model.opset_import[0].version == 8
# Test MaxPool Adapter: 8 -> 1
def test_maxpool_down(self): # type: () -> None
nodes = [helper.make_node('MaxPool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "MaxPool"
assert converted_model.opset_import[0].version == 1
# Test BatchNormalization Adapter: 8 -> 9
def test_batch_normalization_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [helper.make_node(
'BatchNormalization',
inputs=["x", "s", "bias", "mean", "var"],
outputs=["y"]
)]
input_shape = (1, 2, 1, 3)
x = helper.make_tensor_value_info("x", data_type, input_shape)
scale = helper.make_tensor_value_info("s", data_type, [input_shape[1]])
B = helper.make_tensor_value_info("bias", data_type, [input_shape[1]])
mean = helper.make_tensor_value_info("mean", data_type, [input_shape[1]])
var = helper.make_tensor_value_info("var", data_type, [input_shape[1]])
y = helper.make_tensor_value_info("y", data_type, input_shape)
graph = helper.make_graph(
nodes,
"test_batchnormalization_8_9",
[x, scale, B, mean, var],
[y]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == to_opset
# Test BatchNormalization Adapter: 9 -> 8
def test_batchnormalization_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
'BatchNormalization',
inputs=['X', 'scale', 'B', 'mean', 'var'],
outputs=['Y'],
)]
input_shape = (2, 3, 4, 5)
x = onnx.helper.make_tensor_value_info("X", data_type, input_shape)
scale = onnx.helper.make_tensor_value_info("scale", data_type, [input_shape[1]])
B = onnx.helper.make_tensor_value_info("B", data_type, [input_shape[1]])
mean = onnx.helper.make_tensor_value_info("mean", data_type, [input_shape[1]])
var = onnx.helper.make_tensor_value_info("var", data_type, [input_shape[1]])
y = onnx.helper.make_tensor_value_info("Y", data_type, input_shape)
graph = onnx.helper.make_graph(
nodes, "test_batchnormalization", [x, scale, B, mean, var], [y]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == to_opset
# Test Constant Adapter: 8 -> 9
def test_constant_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
output_shape = [2, 3, 4]
output_value = np.arange(24)
nodes = [helper.make_node(
"Constant",
inputs=[],
outputs=["Y"],
value=helper.make_tensor("", data_type, output_shape, output_value))]
graph = helper.make_graph(
nodes,
"test_constant",
[],
[onnx.helper.make_tensor_value_info("Y", data_type, output_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Constant Adapter: 9 -> 8
def test_constant_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
output_shape = [2, 3, 4]
output_value = np.arange(24)
nodes = [helper.make_node(
"Constant",
inputs=[],
outputs=["Y"],
value=helper.make_tensor("", data_type, output_shape, output_value))]
graph = helper.make_graph(
nodes,
"test_constant",
[],
[onnx.helper.make_tensor_value_info("Y", data_type, output_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Flatten Adapter: 8 -> 9
def test_flatten_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Flatten",
inputs=["X"],
outputs=["Y"],
axis=1
)]
graph = helper.make_graph(
nodes,
"test_flatten",
[onnx.helper.make_tensor_value_info("X", data_type, [2, 3, 4])],
[onnx.helper.make_tensor_value_info("Y", data_type, [2, 12])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Flatten"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Flatten Adapter: 9 -> 8
def test_flatten_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Flatten",
inputs=["X"],
outputs=["Y"],
axis=1
)]
graph = helper.make_graph(
nodes,
"test_flatten",
[onnx.helper.make_tensor_value_info("X", data_type, [2, 3, 4])],
[onnx.helper.make_tensor_value_info("Y", data_type, [2, 12])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[1].op_type == "Flatten"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test PRelu Adapter: 8 -> 9
def test_prelu_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"PRelu",
inputs=["X", "Slope"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_prelu",
[onnx.helper.make_tensor_value_info("X", data_type, input_shape),
onnx.helper.make_tensor_value_info("Slope", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "PRelu"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test PRelu Adapter: 9 -> 8
def test_prelu_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"PRelu",
inputs=["X", "Slope"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_prelu",
[onnx.helper.make_tensor_value_info("X", data_type, input_shape),
onnx.helper.make_tensor_value_info("Slope", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "PRelu"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Greater Adapter: 8 -> 9
def test_greater_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Greater",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_greater",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Greater"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test Greater Adapter: 9 -> 8
def test_greater_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Greater",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_greater",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "Greater"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test Less Adapter: 8 -> 9
def test_less_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Less",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_less",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Less"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test Less Adapter: 9 -> 8
def test_less_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Less",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_less",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "Less"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test MatMul Adapter: 8 -> 9
def test_matmul_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"MatMul",
inputs=["X1", "X2"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_matmul",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "MatMul"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test MatMul Adapter: 9 -> 8
def test_matmul_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"MatMul",
inputs=["X1", "X2"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_matmul",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "MatMul"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Gemm Adapter: 8 -> 9
def test_gemm_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Gemm",
inputs=["X1", "X2", "X3"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_gemm",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3]),
onnx.helper.make_tensor_value_info("X3", data_type, [3, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Gemm"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Gemm Adapter: 9 -> 8
def test_gemm_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Gemm",
inputs=["X1", "X2", "X3"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_gemm",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3]),
onnx.helper.make_tensor_value_info("X3", data_type, [3, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[3].op_type == "Gemm"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Upsample Adapter: 8 -> 9
def test_upsample_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Upsample",
inputs=["X"],
outputs=["Y"],
mode="nearest",
scales=[1.0, 1.0, 2.0, 3.0],
)]
graph = helper.make_graph(
nodes,
"test_upsample_8_9",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert len(converted_model.graph.node) == 1
assert converted_model.graph.node[0].op_type == "Upsample"
assert len(converted_model.graph.node[0].attribute) == 1
assert converted_model.graph.node[0].attribute[0].name == "mode"
assert converted_model.opset_import[0].version == to_opset
# Test Helper for Upsample Adapter: 9 -> 8
def helper_upsample_with_initializer(self, raw_scale=False): # type: (bool) -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Upsample",
inputs=["X", "Scales"],
outputs=["Y"],
mode="nearest"
)]
scale_value = [1.0, 1.0, 2.0, 3.0]
scale_tensor = onnx.helper.make_tensor("Scales", onnx.TensorProto.FLOAT, [4], bytes(struct.pack("4f", *scale_value)) if raw_scale else scale_value, raw_scale)
graph = helper.make_graph(
nodes,
"test_upsample",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2]),
onnx.helper.make_tensor_value_info("Scales", data_type, [4])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])],
[scale_tensor])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Upsample"
assert len(converted_model.graph.initializer) == 0
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "scales"
assert converted_model.opset_import[0].version == to_opset
# Test Helper for Upsample Adapter: 9 -> 8
def helper_upsample_with_constant(self, raw_scale=False): # type: (bool) -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.FLOAT
scale_value = [1.0, 1.0, 2.0, 3.0]
scale_tensor = onnx.helper.make_tensor("const_value", onnx.TensorProto.FLOAT, [4], bytes(struct.pack("4f", *scale_value)) if raw_scale else scale_value, raw_scale)
nodes = [
onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['Constant_Output'],
value=scale_tensor),
onnx.helper.make_node(
"Upsample",
inputs=["X", "Constant_Output"],
outputs=["Y"],
mode="nearest")]
graph = helper.make_graph(
nodes,
"test_upsample",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])],
value_info=[onnx.helper.make_tensor_value_info("Constant_Output", data_type, [4])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert len(converted_model.graph.node) == 1
assert converted_model.graph.node[0].op_type == "Upsample"
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "scales"
assert converted_model.opset_import[0].version == to_opset
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_constant_node_9_8(self): # type: () -> None
self.helper_upsample_with_constant(raw_scale=False)
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_initializer_9_8(self): # type: () -> None
self.helper_upsample_with_initializer(raw_scale=False)
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_raw_initializer_9_8(self): # type: () -> None
self.helper_upsample_with_constant(raw_scale=True)
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_raw_constant_node_9_8(self): # type: () -> None
self.helper_upsample_with_constant(raw_scale=True)
# Test Scan Adapter: 8 -> 9
def test_scan_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
node1 = onnx.helper.make_node("Add", inputs=["sum_in", "next"], outputs=["sum_out"],)
node2 = onnx.helper.make_node("Identity", inputs=["sum_out"], outputs=["scan_out"],)
g = onnx.helper.make_graph(
[node1, node2],
"scan_body",
[onnx.helper.make_tensor_value_info("sum_in", data_type, [2]),
onnx.helper.make_tensor_value_info("next", data_type, [2])],
[onnx.helper.make_tensor_value_info("sum_out", data_type, [2]),
onnx.helper.make_tensor_value_info("scan_out", data_type, [2])]
)
nodes = [onnx.helper.make_node(
"Scan",
inputs=["", "initial", "x"],
outputs=["y", "z"],
body=g,
num_scan_inputs=1,
)]
seq_lens = onnx.helper.make_empty_tensor_value_info(" ")
initial = onnx.helper.make_tensor_value_info("initial", data_type, [1, 2])
x = onnx.helper.make_tensor_value_info("x", data_type, [1, 3, 2])
y = onnx.helper.make_tensor_value_info("y", data_type, [1, 2])
z = onnx.helper.make_tensor_value_info("z", data_type, [1, 3, 2])
graph = onnx.helper.make_graph(
nodes, "test_scan_8_9", [seq_lens, initial, x], [y, z]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Scan"
assert converted_model.opset_import[0].version == to_opset
# Test Cast Adapter: 8 -> 9
def test_cast_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type_from = TensorProto.FLOAT
data_type_to = TensorProto.UINT32
nodes = [onnx.helper.make_node(
"Cast",
inputs=["X"],
outputs=["Y"],
to=TensorProto.UINT32
)]
graph = helper.make_graph(
nodes,
"test_cast",
[onnx.helper.make_tensor_value_info("X", data_type_from, [2, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type_to, [2, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Cast"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type_to
assert converted_model.opset_import[0].version == to_opset
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import os
import platform
import unittest
import onnx.backend.base
import onnx.backend.test
from onnx.backend.base import Device, DeviceType
from onnx.backend.test.runner import BackendIsNotSupposedToImplementIt
import onnx.shape_inference
import onnx.version_converter
from typing import Optional, Text, Any, Tuple, Sequence
from onnx import NodeProto, ModelProto, TensorProto
import numpy # type: ignore
# The following just executes the fake backend through the backend test
# infrastructure. Since we don't have full reference implementation of all ops
# in ONNX repo, it's impossible to produce the proper results. However, we can
# run 'checker' (that's what base Backend class does) to verify that all tests
# fed are actually well-formed ONNX models.
#
# If everything is fine, all the tests would be marked as "skipped".
#
# We don't enable report in this test because the report collection logic itself
# fails when models are mal-formed.
class DummyBackend(onnx.backend.base.Backend):
@classmethod
def prepare(cls,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> Optional[onnx.backend.base.BackendRep]
super(DummyBackend, cls).prepare(model, device, **kwargs)
# test shape inference
model = onnx.shape_inference.infer_shapes(model)
value_infos = {vi.name: vi for vi in itertools.chain(model.graph.value_info, model.graph.output)}
if do_enforce_test_coverage_whitelist(model):
for node in model.graph.node:
for i, output in enumerate(node.output):
if node.op_type == 'Dropout' and i != 0:
continue
assert output in value_infos
tt = value_infos[output].type.tensor_type
assert tt.elem_type != TensorProto.UNDEFINED
for dim in tt.shape.dim:
assert dim.WhichOneof('value') == 'dim_value'
raise BackendIsNotSupposedToImplementIt(
"This is the dummy backend test that doesn't verify the results but does run the checker")
@classmethod
def run_node(cls,
node, # type: NodeProto
inputs, # type: Any
device='CPU', # type: Text
outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
**kwargs # type: Any
): # type: (...) -> Optional[Tuple[Any, ...]]
super(DummyBackend, cls).run_node(node, inputs, device=device, outputs_info=outputs_info)
raise BackendIsNotSupposedToImplementIt(
"This is the dummy backend test that doesn't verify the results but does run the checker")
@classmethod
def supports_device(cls, device): # type: (Text) -> bool
d = Device(device)
if d.type == DeviceType.CPU:
return True
return False
test_coverage_whitelist = set(
['bvlc_alexnet', 'densenet121', 'inception_v1', 'inception_v2',
'resnet50', 'shufflenet', 'SingleRelu', 'squeezenet_old', 'vgg19', 'zfnet'])
def do_enforce_test_coverage_whitelist(model): # type: (ModelProto) -> bool
if model.graph.name not in test_coverage_whitelist:
return False
for node in model.graph.node:
if node.op_type in set(['RNN', 'LSTM', 'GRU']):
return False
return True
backend_test = onnx.backend.test.BackendTest(DummyBackend, __name__)
if os.getenv('APPVEYOR'):
backend_test.exclude(r'(test_vgg19|test_zfnet)')
if platform.architecture()[0] == '32bit':
backend_test.exclude(r'(test_vgg19|test_zfnet|test_bvlc_alexnet)')
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test
.test_cases)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import onnx
from onnx.tools import update_model_dims
from onnx import helper, TensorProto
class TestToolsFunctions(unittest.TestCase):
def test_update_inputs_outputs_dim(self): # type: () -> None
node_def = helper.make_node(
"Conv",
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
)
graph_def = helper.make_graph(
[node_def],
'test',
[helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]),
helper.make_tensor_value_info('W', TensorProto.FLOAT, [1, 1, 3, 3])],
[helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 2, 2])]
)
model_def = helper.make_model(graph_def, producer_name='test')
updated_def = update_model_dims.update_inputs_outputs_dims(
model_def,
{
"x": [1, 1, 'x1', -1],
"W": [1, 1, 3, 3],
},
{
"y": [1, 1, -1, -1],
})
onnx.checker.check_model(updated_def)
self.assertEqual(updated_def.graph.input[0].type.tensor_type.shape.dim[2].dim_param, 'x1')
self.assertEqual(updated_def.graph.input[0].type.tensor_type.shape.dim[3].dim_param, 'x_3')
self.assertEqual(updated_def.graph.output[0].type.tensor_type.shape.dim[2].dim_param, 'y_2')
self.assertEqual(updated_def.graph.output[0].type.tensor_type.shape.dim[3].dim_param, 'y_3')
if __name__ == '__main__':
unittest.main()
|
import unittest
from onnx import defs, helper
class TestRelu(unittest.TestCase):
def test_relu(self): # type: () -> None
self.assertTrue(defs.has('Relu'))
helper.make_node(
'Relu', ['X'], ['Y'])
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import checker, helper, TensorProto, NodeProto, GraphProto, ValueInfoProto, ModelProto, ONNX_ML, SparseTensorProto
from onnx.helper import make_node, make_tensor, make_tensor_value_info, make_empty_tensor_value_info, make_opsetid, make_sequence_value_info
from typing import Sequence, Union, Text, Tuple, List, Any, Optional
import onnx.shape_inference
import unittest
import os
import numpy as np # type: ignore
class TestShapeInference(unittest.TestCase):
def _make_graph(self,
seed_values, # type: Sequence[Union[Text, Tuple[Text, TensorProto.DataType, Any]]]
nodes, # type: List[NodeProto]
value_info, # type: List[ValueInfoProto]
initializer=None # type: Optional[Sequence[TensorProto]]
): # type: (...) -> GraphProto
if initializer is None:
initializer = []
names_in_initializer = set(x.name for x in initializer)
input_value_infos = []
# If the starting values are not also initializers,
# introduce the starting values as the output of reshape,
# so that the sizes are guaranteed to be unknown
for seed_value in seed_values:
if isinstance(seed_value, tuple):
seed_name = seed_value[0]
seed_value_info = make_tensor_value_info(*seed_value)
else:
seed_name = seed_value
seed_value_info = make_empty_tensor_value_info(seed_value)
if seed_name in names_in_initializer:
input_value_infos.append(seed_value_info)
else:
value_info.append(seed_value_info)
input_value_infos.append(make_tensor_value_info('SEED_' + seed_name, TensorProto.UNDEFINED, ()))
input_value_infos.append(make_tensor_value_info('UNKNOWN_SHAPE_' + seed_name, TensorProto.UNDEFINED, ()))
nodes[:0] = [make_node("Reshape", ['SEED_' + seed_name, 'UNKNOWN_SHAPE_' + seed_name], [seed_name])]
return helper.make_graph(nodes, "test", input_value_infos, [], initializer=initializer, value_info=value_info)
def _inferred(self, graph, **kwargs): # type: (GraphProto, **Any) -> ModelProto
kwargs[str('producer_name')] = 'onnx-test'
orig_model = helper.make_model(graph, **kwargs)
inferred_model = onnx.shape_inference.infer_shapes(orig_model)
checker.check_model(inferred_model)
return inferred_model
def _assert_inferred(self, graph, vis, **kwargs): # type: (GraphProto, List[ValueInfoProto], **Any) -> None
names_in_vis = set(x.name for x in vis)
vis = list(x for x in graph.value_info if x.name not in names_in_vis) + vis
inferred_model = self._inferred(graph, **kwargs)
inferred_vis = list(inferred_model.graph.value_info)
vis = list(sorted(vis, key=lambda x: x.name))
inferred_vis = list(sorted(inferred_vis, key=lambda x: x.name))
if vis == inferred_vis:
return
# otherwise some custom logic to give a nicer diff
vis_names = set(x.name for x in vis)
inferred_vis_names = set(x.name for x in inferred_vis)
assert vis_names == inferred_vis_names, (vis_names, inferred_vis_names)
for vi, inferred_vi in zip(vis, inferred_vis):
assert vi == inferred_vi, '\n%s\n%s\n' % (vi, inferred_vi)
assert False
def test_empty_graph(self): # type: () -> None
graph = self._make_graph(
['y'],
[], [])
self._assert_inferred(graph, [])
def _identity_prop(self, op, **kwargs): # type: (Text, **Any) -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5))],
[make_node(op, 'x', 'y', **kwargs)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (30, 4, 5))])
def test_transpose(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))])
def test_transpose_preexisting(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.FLOAT, None)])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))])
def test_transpose_partial(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.UNDEFINED, (3, "a", "b"))]) # type: ignore
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))])
def test_transpose_preexisting_incorrect_shape(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5, 5))])
self.assertRaises(RuntimeError, self._inferred, graph)
def test_transpose_preexisting_incorrect_type(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.STRING, (3, 2, 4))])
self.assertRaises(RuntimeError, self._inferred, graph)
def _make_matmul_test_all_dims_known(self, shape1, shape2): # type: (Sequence[int], Sequence[int]) -> None
expected_out_shape = np.matmul(np.arange(np.product(shape1)).reshape(shape1),
np.arange(np.product(shape2)).reshape(shape2)).shape
graph = self._make_graph(
[('x', TensorProto.FLOAT, shape1),
('y', TensorProto.FLOAT, shape2)],
[make_node('MatMul', ['x', 'y'], ['z'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, expected_out_shape)])
def test_matmul_all_dims_known(self): # type: () -> None
self._make_matmul_test_all_dims_known((2,), (2,))
self._make_matmul_test_all_dims_known((4, 2), (2, 4))
self._make_matmul_test_all_dims_known((5, 2), (2, 4))
self._make_matmul_test_all_dims_known((5, 2), (2, 1))
self._make_matmul_test_all_dims_known((1, 2), (2, 3))
self._make_matmul_test_all_dims_known((2,), (2, 3))
self._make_matmul_test_all_dims_known((4, 2), (2,))
self._make_matmul_test_all_dims_known((1, 4, 2), (3, 2, 3))
self._make_matmul_test_all_dims_known((3, 4, 2), (3, 2, 3))
self._make_matmul_test_all_dims_known((5, 1, 4, 2), (1, 3, 2, 3))
self._make_matmul_test_all_dims_known((4, 2), (3, 2, 3))
def _make_matmul_test_allow_unknown(self, shape1, shape2, expected_out_shape): # type: (Any, Any, Any) -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, shape1),
('y', TensorProto.FLOAT, shape2)],
[make_node('MatMul', ['x', 'y'], ['z'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, expected_out_shape)])
def test_matmul_allow_unknown(self): # type: () -> None
self._make_matmul_test_allow_unknown((None,), (None,), ())
self._make_matmul_test_allow_unknown((3,), (None,), ())
self._make_matmul_test_allow_unknown((2,), (2, "a"), ("a",))
self._make_matmul_test_allow_unknown((4, 2), (2, "a"), (4, "a"))
self._make_matmul_test_allow_unknown((4, None), (2, "a"), (4, "a"))
self._make_matmul_test_allow_unknown((4, None), (None, "a"), (4, "a"))
self._make_matmul_test_allow_unknown((1, 4, 2), ("a", 2, 5), ("a", 4, 5))
self._make_matmul_test_allow_unknown((1, 3, 4, 2), ("a", 2, 5), (1, 3, 4, 5))
self._make_matmul_test_allow_unknown((3,), None, None)
self._make_matmul_test_allow_unknown(None, None, None)
def test_cast(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3))],
[make_node("Cast", ["x"], ["y"], to=TensorProto.UINT8)],
[])
self._assert_inferred(graph, [make_tensor_value_info("y", TensorProto.UINT8, (2, 4, 3))])
def test_concat(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3)),
("y", TensorProto.FLOAT, (7, 4, 3))],
[make_node("Concat", ['x', 'y'], ['z'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (9, 4, 3))])
def test_concat_missing_shape(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3)),
"y",
("z", TensorProto.FLOAT, (None, None, None))],
[make_node("Concat", ['x', 'y', 'z'], ['out'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, None)])
def test_concat_3d_axis_2(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('y', TensorProto.FLOAT, (2, 2, 2))],
[make_node('Concat', ['x', 'y'], ['z'], axis=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 2, 4))])
def test_concat_param(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, ("a", 2)),
("y", TensorProto.FLOAT, ("a", 3))],
[make_node("Concat", ['x', 'y'], ['z'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ("a", 5))])
def test_concat_param_single_input(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, ("a", 2))],
[make_node("Concat", ['x'], ['z'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ("a", 2))])
def test_reshape_dynamic_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3)),
('shape', TensorProto.UNDEFINED, (2,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, None)])
def test_reshape_static_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3)),
('shape', TensorProto.INT64, (2,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (2,), (3, 8))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (3, 8))])
def test_reshape_static_shape_inferred(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3)),
('shape', TensorProto.INT64, (3,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (3,), (0, 3, -1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (2, 3, 4))])
def test_reshape_static_shape_constant(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3))],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (2,), (3, 8))),
make_node("Reshape", ['x', 'shape'], ['y'])],
[])
self._assert_inferred(graph, [
make_tensor_value_info('shape', TensorProto.INT64, (2,)),
make_tensor_value_info('y', TensorProto.UINT8, (3, 8))])
def test_upsample(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Upsample", ['x', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,), (1.0, 1.1, 1.3, 1.9))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 4, 3, 9))],
opset_imports=[helper.make_opsetid("", 9)])
def test_upsample_raw_data(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Upsample", ['x', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,),
vals=np.array([1.0, 1.1, 1.3, 1.9], dtype='<f4').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 4, 3, 9))],
opset_imports=[helper.make_opsetid("", 9)])
def test_upsample_raw_data_v7(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (1, 3, 4, 5))],
[make_node("Upsample", ['x'], ['y'], scales=[2.0, 1.1, 2.3, 1.9])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 3, 9, 9))],
opset_imports=[helper.make_opsetid("", 7)])
def test_expand(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (3, 1)),
('shape', TensorProto.INT64, (3,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (3,), (2, 1, 6))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 3, 6))])
def test_expand_scalar_input(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, ()),
('shape', TensorProto.INT64, (2,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (2,), (4, 8))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (4, 8))])
def test_expand_raw_data(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (3, 1)),
('shape', TensorProto.INT64, (2,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (2,),
vals=np.array([3, 4], dtype='<i8').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (3, 4))])
def test_resize_size(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('roi', TensorProto.FLOAT, (8,)),
('scales', TensorProto.FLOAT, (4,)),
('sizes', TensorProto.INT64, (4,))],
[make_node("Resize", ['x', 'roi', 'scales', 'sizes'], ['y'])],
[],
initializer=[make_tensor('sizes', TensorProto.INT64, (4,), (3, 5, 6, 7))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (3, 5, 6, 7))])
def test_resize_scale(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('roi', TensorProto.FLOAT, (8,)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Resize", ['x', 'roi', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,), (1.0, 1.1, 1.3, 1.9))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 4, 3, 9))])
def test_resize_scale_raw_data(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT32, (1, 3, 4, 5)),
('roi', TensorProto.FLOAT, (8,)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Resize", ['x', 'roi', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,),
vals=np.array([2.0, 1.1, 2.3, 1.9], dtype='<f4').tobytes(), raw=True)])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 3, 9, 9))])
def test_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (3,))])
def test_size(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Size", ['x'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, ())])
def test_gather(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 3)),
('i', TensorProto.INT64, (2,))],
[make_node("Gather", ['x', 'i'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3))]) # type: ignore
def test_gather_axis1(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 3, 5)),
('i', TensorProto.INT64, (1, 2))],
[make_node("Gather", ['x', 'i'], ['y'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 1, 2, 5))]) # type: ignore
def test_gather_into_scalar(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3,)),
('i', TensorProto.INT64, ())],
[make_node("Gather", ['x', 'i'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ())])
def test_gather_elements(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2)),
('i', TensorProto.INT64, (2, 2))],
[make_node("GatherElements", ['x', 'i'], ['y'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))]) # type: ignore
def test_gather_elements_axis0(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 3)),
('i', TensorProto.INT64, (2, 3))],
[make_node("GatherElements", ['x', 'i'], ['y'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3))]) # type: ignore
def test_scatter(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 3)),
('i', TensorProto.INT64, (2, 3)),
('u', TensorProto.FLOAT, (2, 3))],
[make_node("Scatter", ['x', 'i', 'u'], ['y'])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 3))],
opset_imports=[helper.make_opsetid("", 10)]) # type: ignore
def test_scatter_axis1(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 5)),
('i', TensorProto.INT64, (1, 2)),
('u', TensorProto.FLOAT, (1, 2))],
[make_node("Scatter", ['x', 'i', 'u'], ['y'], axis=1)],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (1, 5))],
opset_imports=[helper.make_opsetid("", 10)]) # type: ignore
def test_scatter_elements(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 3)),
('i', TensorProto.INT64, (2, 3)),
('u', TensorProto.FLOAT, (2, 3))],
[make_node("ScatterElements", ['x', 'i', 'u'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, 3))]) # type: ignore
def test_scatter_elements_axis1(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 5)),
('i', TensorProto.INT64, (1, 2)),
('u', TensorProto.FLOAT, (1, 2))],
[make_node("ScatterElements", ['x', 'i', 'u'], ['y'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 5))]) # type: ignore
def test_scatternd(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('indices', TensorProto.INT64, (3, 3, 2)),
('updates', TensorProto.FLOAT, (3, 3, 6))],
[make_node("ScatterND", ['x', 'indices', 'updates'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 5, 6))]) # type: ignore
def test_scatternd_noshape(self): # type: () -> None
# The shape of 'x_reshaped' cannot be inferred, since it is the output of a dynamic reshape.
# Thus the shape of 'y' is also None.
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('indices', TensorProto.INT64, (3, 3, 2)),
('updates', TensorProto.FLOAT, (3, 3, 6)),
('shape', TensorProto.UNDEFINED, (2,))],
[make_node("Reshape", ['x', 'shape'], ['x_reshaped']),
make_node("ScatterND", ['x_reshaped', 'indices', 'updates'], ['y'])],
[])
self._assert_inferred(graph, [
make_tensor_value_info('x_reshaped', TensorProto.FLOAT, None),
make_tensor_value_info('y', TensorProto.FLOAT, None)]) # type: ignore
def test_squeeze(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 3, 1, 1, 2, 1))],
[make_node('Squeeze', 'x', 'y', axes=[0, 2, 3, 5])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, 2))])
def test_unsqueeze_regular(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2))],
[make_node('Unsqueeze', 'x', 'y', axes=[0, 1, 3, 5])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 1, 3, 1, 2, 1))])
def test_unsqueeze_unsorted_axes(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5))],
[make_node('Unsqueeze', 'x', 'y', axes=[4, 0])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 3, 4, 5, 1))])
def test_unsqueeze_negative_axes(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5))],
[make_node('Unsqueeze', 'x', 'y', axes=[0, -1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 3, 4, 5, 1))])
def test_slice_without_input_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)), ('starts', TensorProto.INT64, (1,)), ('ends', TensorProto.INT64, (1,))],
[make_node('Slice', ['x', 'starts', 'ends'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, None)])
def test_slice_with_input_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)), ('starts', TensorProto.INT64, (2, )), ('ends', TensorProto.INT64, (2, ))],
[make_node('Slice', ['x', 'starts', 'ends'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2, ),
vals=np.array([1, 0], dtype='<i8').tobytes(), raw=True), # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
make_tensor('ends', TensorProto.INT64, (2, ), (2, 2))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 2))])
def test_slice_with_input_shape_containing_dim_params(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 'a', 1)),
('starts', TensorProto.INT64, (3,)),
('ends', TensorProto.INT64, (3,))],
[make_node('Slice', ['x', 'starts', 'ends'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (3,), (0, 0, 0)),
make_tensor('ends', TensorProto.INT64, (3,), (1, 1, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, None, 1))]) # type: ignore
def test_slice_with_input_shape_steps(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7)),
('starts', TensorProto.INT64, (3,)),
('ends', TensorProto.INT64, (3,)),
('axes'),
('steps', TensorProto.INT64, (3,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (3,), (1, 0, 0)),
make_tensor('ends', TensorProto.INT64, (3,), (2, 6, 6)),
make_tensor('steps', TensorProto.INT64, (3,), (1, 4, 3))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 2, 2))])
def test_slice_with_input_shape_axes(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 6, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,)),
('steps')],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (2, 2)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 2))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 6, 2))])
def test_slice_unsorted_axes(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (2, 2)),
make_tensor('axes', TensorProto.INT64, (2,), (1, 0))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 1))]) # can handle unsorted axes
def test_slice_giant_number(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (200, 22000)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))])
def test_slice_giant_step(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,)),
('steps', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (200, 200)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1)),
make_tensor('steps', TensorProto.INT64, (2,), (1, 200))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 1))])
def test_slice_negative_end(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (200, -1)), # negative end means begin from end of a dimension (here end = 2 - 1 = 1)
make_tensor('axes', TensorProto.INT64, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 1))]) # type: ignore
def test_slice_negative_start(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, -2)), # negative start means begin from end of a dimension (here end = 2 - 2 = 0)
make_tensor('ends', TensorProto.INT64, (2,), (200, 3)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))]) # type: ignore
def test_slice_negative_step(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,)),
('steps', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 4)), # 4 will be clamped to 3 since we are negative stepping
make_tensor('ends', TensorProto.INT64, (2,), (200, 0)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1)),
make_tensor('steps', TensorProto.INT64, (2,), (1, -1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3))]) # type: ignore
def test_slice_variable_copy(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, ("a", 2)),
('starts', TensorProto.INT64, (1,)),
('ends', TensorProto.INT64, (1,)),
('axes', TensorProto.INT64, (1,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (1,), (1,)),
make_tensor('ends', TensorProto.INT64, (1,), (200,)),
make_tensor('axes', TensorProto.INT64, (1,), (1,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ("a", 1))]) # type: ignore
def test_slice_variable_input_types(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.DOUBLE, (3, 2)),
('starts', TensorProto.INT32, (2,)),
('ends', TensorProto.INT32, (2,)),
('axes', TensorProto.INT32, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT32, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT32, (2,), (200, 22000)),
make_tensor('axes', TensorProto.INT32, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.DOUBLE, (2, 2))])
def test_conv(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('y', TensorProto.FLOAT, (5, 4, 2, 4, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[0, 1, 1, 0, 0, 1], dilations=[1, 2, 2], strides=[1, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (3, 5, 4, 1, 3))])
def test_conv_1d_simple(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (50, 4, 2))],
[make_node('Conv', ['x', 'y'], 'z', dilations=[1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 4))])
def test_conv_dilations(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 8, 8, 8)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', dilations=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 6, 4, 2))])
def test_conv_strides(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 8, 8, 8)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', strides=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 6, 3, 2))])
def test_conv_pads(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 6, 6, 6))])
def test_conv_auto_pad(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 4, 3, 2))],
[make_node('Conv', ['x', 'y'], 'z', auto_pad='SAME_UPPER')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 7, 6, 4))])
def test_conv_auto_pad_dilation(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 65, 64, 63)),
('y', TensorProto.FLOAT, (50, 4, 4, 3, 2))],
[make_node('Conv', ['x', 'y'], 'z', auto_pad='SAME_UPPER', dilations=[2, 3, 4])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 65, 64, 63))])
def test_conv_group(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 8, 8, 8)),
('y', TensorProto.FLOAT, (4, 1, 8, 8, 8))],
[make_node('Conv', ['x', 'y'], 'z', group=4)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 1, 1, 1))])
def test_conv_only_one_pos(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (50, 4, 5))],
[make_node('Conv', ['x', 'y'], 'z', strides=[2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 1))])
def test_conv_partial_missing_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, None, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, None, 6, 6))]) # type: ignore
def test_conv_partial_missing_weight_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, None, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, None)])
def test_relu(self): # type: () -> None
self._identity_prop('Relu')
def test_add(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (30, 4, 5))],
[make_node('Add', ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 5))])
def test_pow(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (30, 4, 5))],
[make_node('Pow', ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 5))])
def test_bitshift(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT32, (2, 3, 1)),
('y', TensorProto.UINT32, (2, 3, 1))],
[make_node('BitShift', ['x', 'y'], 'z', direction="RIGHT")],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.UINT32, (2, 3, 1))])
def test_bitshift_broadcast_to_first(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT32, (16, 4, 1)),
('y', TensorProto.UINT32, (1,))],
[make_node('BitShift', ['x', 'y'], 'z', direction="RIGHT")],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.UINT32, (16, 4, 1))])
def test_bitshift_broadcast_to_second(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT32, (1,)),
('y', TensorProto.UINT32, (2, 3, 1))],
[make_node('BitShift', ['x', 'y'], 'z', direction="RIGHT")],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.UINT32, (2, 3, 1))])
def test_sum_single(self): # type: () -> None
self._identity_prop('Sum')
def test_sum_multi(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (30, 4, 5)),
('z', TensorProto.FLOAT, (30, 4, 5))],
[make_node('Sum', ['x', 'y', 'z'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (30, 4, 5))])
def test_sum_multi_broadcasting(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 1, 5)),
('y', TensorProto.FLOAT, ("a", 4, 1)),
('z', TensorProto.FLOAT, (4, "b"))],
[make_node('Sum', ['x', 'y', 'z'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (30, 4, 5))])
def test_sum_broadcasting_param(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, ("a", 1, 5)),
('y', TensorProto.FLOAT, ("a", 4, 1))],
[make_node('Sum', ['x', 'y'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, ("a", 4, 5))])
def test_random_normal(self): # type: () -> None
graph = self._make_graph(
[],
[make_node('RandomNormal', [], ['out'], dtype=TensorProto.DOUBLE, shape=(3, 4, 5))],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.DOUBLE, (3, 4, 5))])
def test_random_normal_like(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node('RandomNormalLike', ['X'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (2, 3, 4))])
def test_random_normal_like_with_dtype(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node('RandomNormalLike', ['X'], ['out'], dtype=TensorProto.DOUBLE,)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.DOUBLE, (2, 3, 4))])
def _logical_binary_op(self, op, input_type): # type: (Text, TensorProto.DataType) -> None
graph = self._make_graph(
[('x', input_type, (30, 4, 5)),
('y', input_type, (30, 4, 5))],
[make_node(op, ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
def _logical_binary_op_with_broadcasting(self, op, input_type): # type: (Text, TensorProto.DataType) -> None
graph = self._make_graph(
[('x', input_type, (1, 5)),
('y', input_type, (30, 4, 5))],
[make_node(op, ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
def test_logical_and(self): # type: () -> None
self._logical_binary_op('And', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('And', TensorProto.BOOL)
def test_logical_or(self): # type: () -> None
self._logical_binary_op('Or', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Or', TensorProto.BOOL)
def test_logical_xor(self): # type: () -> None
self._logical_binary_op('Xor', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Xor', TensorProto.BOOL)
def test_greater(self): # type: () -> None
self._logical_binary_op('Greater', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Greater', TensorProto.BOOL)
def test_less(self): # type: () -> None
self._logical_binary_op('Less', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Less', TensorProto.BOOL)
def test_equal(self): # type: () -> None
self._logical_binary_op('Equal', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Equal', TensorProto.BOOL)
def test_logical_not(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.BOOL, (30, 4, 5))],
[make_node('Not', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
def test_less_or_equal(self): # type: () -> None
self._logical_binary_op('LessOrEqual', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('LessOrEqual', TensorProto.BOOL)
def test_greater_or_equal(self): # type: () -> None
self._logical_binary_op('GreaterOrEqual', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('GreaterOrEqual', TensorProto.BOOL)
def test_flatten(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 5))],
[make_node('Flatten', ['x'], ['z'], axis=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (6, 20))])
def test_flatten_default_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 5))],
[make_node('Flatten', ['x'], ['z'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 60))])
def test_flatten_zero_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 5))],
[make_node('Flatten', ['x'], ['z'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (1, 120))])
def test_flatten_unknown_dim(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 'N', 4, 5))],
[make_node('Flatten', ['x'], ['z'], axis=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, 20))]) # type: ignore
def test_space_to_depth(self): # type: () -> None
b = 10
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 100, 100))],
[make_node('SpaceToDepth', ['x'], ['z'], blocksize=b)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 300, 10, 10))])
def test_space_to_depth_unknown_dim(self): # type: () -> None
b = 10
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 'N', 100, 100))],
[make_node('SpaceToDepth', ['x'], ['z'], blocksize=b)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, None, 10, 10))]) # type: ignore
def test_depth_to_space(self): # type: () -> None
b = 10
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 300, 10, 10))],
[make_node('DepthToSpace', ['x'], ['z'], blocksize=b, mode='DCR')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 3, 100, 100))])
def _rnn_forward(self, seqlen, batchsize, inpsize, hiddensize): # type: (int, int, int, int) -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (seqlen, batchsize, inpsize)),
('w', TensorProto.FLOAT, (1, hiddensize, inpsize)),
('r', TensorProto.FLOAT, (1, hiddensize, hiddensize))],
[make_node('RNN', ['x', 'w', 'r'], ['all', 'last'], hidden_size=hiddensize)],
[])
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (seqlen, 1, batchsize, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (1, batchsize, hiddensize))])
def test_rnn_forward(self): # type: () -> None
self._rnn_forward(64, 32, 10, 4)
def _rnn_bidirectional(self, seqlen, batchsize, inpsize, hiddensize): # type: (int, int, int, int) -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (seqlen, batchsize, inpsize)),
('w', TensorProto.FLOAT, (2, hiddensize, inpsize)),
('r', TensorProto.FLOAT, (2, hiddensize, hiddensize))],
[make_node('RNN', ['x', 'w', 'r'], ['all', 'last'], hidden_size=hiddensize,
direction="bidirectional")],
[])
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (seqlen, 2, batchsize, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (2, batchsize, hiddensize))])
def test_rnn_bidirectional(self): # type: () -> None
self._rnn_bidirectional(64, 32, 10, 4)
def _lstm_forward(self, seqlen, batchsize, inpsize, hiddensize): # type: (int, int, int, int) -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (seqlen, batchsize, inpsize)),
('w', TensorProto.FLOAT, (1, 4 * hiddensize, inpsize)),
('r', TensorProto.FLOAT, (1, 4 * hiddensize, hiddensize))],
[make_node('LSTM', ['x', 'w', 'r'], ['all', 'hidden', 'last'], hidden_size=hiddensize)],
[])
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (seqlen, 1, batchsize, hiddensize)),
make_tensor_value_info('hidden', TensorProto.FLOAT, (1, batchsize, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (1, batchsize, hiddensize))])
def test_lstm_forward(self): # type: () -> None
self._lstm_forward(64, 32, 10, 4)
def test_topk_default_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10))],
[make_node('TopK', ['x', 'k'], ['y', 'z'])],
[],
initializer=[make_tensor('k', TensorProto.INT64, (1,), (2,))])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 5, 2)),
make_tensor_value_info('z', TensorProto.INT64, (3, 4, 5, 2))])
def test_topk(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10))],
[make_node('TopK', ['x', 'k'], ['y', 'z'], axis=2)],
[],
initializer=[make_tensor('k', TensorProto.INT64, (1,), (2,))])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 2, 10)),
make_tensor_value_info('z', TensorProto.INT64, (3, 4, 2, 10))])
def test_topk_raw_data(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10))],
[make_node('TopK', ['x', 'k'], ['y', 'z'], axis=2)],
[],
initializer=[make_tensor('k', TensorProto.INT64, (1,),
vals=np.array([3], dtype='<i8').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 3, 10)),
make_tensor_value_info('z', TensorProto.INT64, (3, 4, 3, 10))])
def test_topk_missing_k_value_output_rank_check(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10)),
('k', TensorProto.INT64, (1,))],
[make_node('TopK', ['x', 'k'], ['y', 'z'], axis=2)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (None, None, None, None)), # type: ignore
make_tensor_value_info('z', TensorProto.INT64, (None, None, None, None))]) # type: ignore
def test_gemm(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (7, 5)),
('y', TensorProto.FLOAT, (5, 11)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_transA(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 7)),
('y', TensorProto.FLOAT, (5, 11)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'], transA=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_transB(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (7, 5)),
('y', TensorProto.FLOAT, (11, 5)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'], transB=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_transA_and_transB(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 7)),
('y', TensorProto.FLOAT, (11, 5)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'], transA=1, transB=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_no_bias(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (13, 7)),
('y', TensorProto.FLOAT, (7, 17))],
[make_node('Gemm', ['x', 'y'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (13, 17))])
def test_reduce_op_shape_2_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', axes=(1, 2), keepdims=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (24,))])
def test_reduce_op_shape_keep_dims(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', axes=(1, 2), keepdims=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (24, 1, 1))])
def test_reduce_op_shape_default_value(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y')],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 1, 1))])
def test_reduce_op_shape_no_axes_do_not_keep_dims(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', keepdims=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, tuple())])
def test_reduce_op_shape_negative_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', axes=(-1, -2))],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (24, 1, 1))])
def test_argmax_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y', axis=1, keepdims=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (24, 1, 11))])
def test_argmax_shape_keepdims(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y', axis=0, keepdims=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (4, 11))])
def test_argmax_shape_default_value(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y')],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1, 4, 11))])
def test_argmax_shape_negative_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y', axis=-2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (24, 1, 11))])
def test_dropout(self): # type: () -> None
graph = self._make_graph(
[('data', TensorProto.FLOAT, (3, 4, 5,)),
('ratio', TensorProto.FLOAT, ())],
[make_node('Dropout', ['data', 'ratio'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5,))])
def test_LRN(self): # type: () -> None
self._identity_prop('LRN', alpha=0.5, beta=0.5, size=1)
def test_batch_norm(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,)),
('mean', TensorProto.FLOAT, (4,)),
('var', TensorProto.FLOAT, (4,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'mean', 'var'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))])
def test_split_negative_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4))],
[make_node('Split', ['x'], ['y', 'z'], axis=-1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2)),
make_tensor_value_info('z', TensorProto.FLOAT, (2, 2))])
def test_split_with_split_attribute(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4))],
[make_node('Split', ['x'], ['y', 'z'], axis=1, split=[3, 1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3)),
make_tensor_value_info('z', TensorProto.FLOAT, (2, 1))])
def test_split_with_split_attribute_unknown_split_dim(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 'a', 'b'))],
[make_node('Split', ['x'], ['y', 'z'], axis=1, split=[3, 1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, None, 'b')), # type: ignore
make_tensor_value_info('z', TensorProto.FLOAT, (2, None, 'b'))]) # type: ignore
def test_split_from_GLU(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7))],
[make_node('Split', ['x'], ['y', 'z'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('z', TensorProto.FLOAT, (5, 3, 7))])
def test_GLU_partial(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7))],
[make_node('Split', ['x'], ['y', 'z'], axis=1),
make_node('Sigmoid', ['z'], ['a'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('z', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('a', TensorProto.FLOAT, (5, 3, 7))])
def test_GLU(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7))],
[make_node('Split', ['x'], ['y', 'z'], axis=1),
make_node('Sigmoid', ['z'], ['a']),
make_node('Mul', ['y', 'a'], ['b'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('z', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('a', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('b', TensorProto.FLOAT, (5, 3, 7))])
def test_softmax_2d(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('Softmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5))])
def test_softmax_3d(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('Softmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_hardmax_2d(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('Hardmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5))])
def test_hardmax_3d(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('Hardmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_logsoftmax_2d(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('LogSoftmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5))])
def test_logsoftmax_3d(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('LogSoftmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_logsoftmax_3d_negative_axis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('LogSoftmax', ['x'], 'z', axis=-1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_maxpool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_maxpool_with_indices(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y", "Z"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3)),
make_tensor_value_info("Z", TensorProto.INT64, (5, 3, 3, 3))])
def test_maxpool_3D(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3, 3))])
def test_maxpool_with_padding(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 6, 6))])
def test_maxpool_with_padding_and_stride(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_maxpool_with_floor_mode(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (32, 288, 35, 35))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], strides=[2, 2], ceil_mode=False)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (32, 288, 17, 17))])
def test_maxpool_with_ceil_mode(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (32, 288, 35, 35))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], strides=[2, 2], ceil_mode=True)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (32, 288, 18, 18))])
def test_maxpool_ceil(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (1, 1, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[3, 3], strides=[2, 2], ceil_mode=True)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (1, 1, 2, 2))])
def test_maxpool_with_dilations(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], dilations=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 2, 2))])
def test_maxpool_with_same_upper_padding_and_stride(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_UPPER", kernel_shape=[2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 2, 2))])
def test_maxpool_with_same_upper_padding_and_stride_and_dilation(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_UPPER", kernel_shape=[2, 2], strides=[2, 2], dilations=[2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 2, 2))])
def test_maxpool_with_same_upper_padding_and_stride_one(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_UPPER", kernel_shape=[2, 2], strides=[1, 1])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 4, 4))])
def test_maxpool_with_same_lower_padding_and_stride(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 9, 9))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_LOWER", kernel_shape=[2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 5, 5))])
def test_maxpool_with_same_lower_padding_and_stride_and_dilation(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 9, 9))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_LOWER", kernel_shape=[2, 2], strides=[2, 2], dilations=[2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 5, 5))])
def test_maxpool_with_same_lower_padding_and_big_stride(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_LOWER", kernel_shape=[2, 2], strides=[4, 4])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_averagepool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_averagepool_3D(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3, 3))])
def test_averagepool_with_padding(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 6, 6))])
def test_averagepool_with_padding_and_stride(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_averagepool_ceil(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (1, 1, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[3, 3], strides=[2, 2], ceil_mode=True)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (1, 1, 2, 2))])
def test_lppool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_lppool_3D(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3, 3))])
def test_lppool_with_padding(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 6, 6))])
def test_lppool_with_padding_and_stride(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_roipool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4)),
("rois", TensorProto.INT64, (2, 5))],
[make_node("MaxRoiPool", ["X", "rois"], ["Y"], pooled_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3, 2, 2))])
def test_lp_norm(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7))],
[make_node('LpNormalization', ['x'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))])
def test_instance_norm(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,))],
[make_node('InstanceNormalization', ['x', 'scale', 'b'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))])
def test_global_maxpool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("GlobalMaxPool", ["X"], ["Y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_global_averagepool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("GlobalAveragePool", ["X"], ["Y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_global_lppool(self): # type: () -> None
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("GlobalLpPool", ["X"], ["Y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_conv_transpose(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 33, 33))])
def test_conv_transpose_with_pads(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 30, 30))])
def test_conv_transpose_with_output_shape(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], output_shape=[36, 36])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 36, 36))])
def test_conv_transpose_with_kernel_shape(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, None, None))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', kernel_shape=[3, 3], strides=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 30, 30))])
def test_conv_transpose_with_dilations(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], dilations=[3, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 34, 34))])
def test_conv_transpose_with_group(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], group=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 64, 30, 30))])
def test_conv_transpose_with_group_and_output_shape(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], group=2, output_shape=[36, 36])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 64, 36, 36))])
def test_mvn_function_output_shape(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16))],
[make_node('MeanVarianceNormalization', 'X', 'Y', axes=[0, 2, 3])],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 48, 16, 16))])
def test_scan(self): # type: () -> None
batch_size = 1
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Scan to match
# the GraphProto, but Scan knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (batch_size, loop_state_size)),
('scan_input', TensorProto.FLOAT, (batch_size, seq_len, input_size))],
[make_node('Scan', ['', 'loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (batch_size, loop_state_size)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (batch_size, seq_len, input_size))],
opset_imports=[helper.make_opsetid("", 8)])
def test_scan_opset9(self): # type: () -> None
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Scan to match
# the GraphProto, but Scan knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (seq_len, input_size))],
opset_imports=[helper.make_opsetid("", 9)])
def test_scan_opset9_axes(self): # type: () -> None
axis_0_len = 'axis0'
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Scan to match
# the GraphProto, but Scan knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph, scan_input_axes=[1])],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (seq_len, axis_0_len, input_size))],
opset_imports=[helper.make_opsetid("", 9)])
def test_scan_opset9_output_axes(self): # type: () -> None
axis_0_len = 'axis0'
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph, scan_input_axes=[1], scan_output_axes=[1])],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
opset_imports=[helper.make_opsetid("", 9)])
def test_scan_opset9_negative_axes(self): # type: () -> None
axis_0_len = 'axis0'
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph, scan_input_axes=[-2], scan_output_axes=[-2])],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
opset_imports=[helper.make_opsetid("", 9)])
def test_if_ver1(self): # type: () -> None
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_subgraph = helper.make_graph(
[make_node('Add', ['current_value', 'add_value'], ['then_output'])],
"then_subgraph",
[], # no inputs
[make_tensor_value_info('then_output', TensorProto.UNDEFINED, None)],
)
else_subgraph = helper.make_graph(
[make_node('Sub', ['current_value', 'sub_value'], ['else_output'])],
"else_subgraph",
[], # no inputs
[make_tensor_value_info('else_output', TensorProto.UNDEFINED, None)],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('current_value', TensorProto.FLOAT, (1,)),
('add_value', TensorProto.FLOAT, (1,)),
('sub_value', TensorProto.FLOAT, (1,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('if_output', TensorProto.FLOAT, (1,))],
opset_imports=[make_opsetid("", 10)])
def test_if(self): # type: () -> None
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_subgraph = helper.make_graph(
[make_node('Add', ['current_value', 'add_value'], ['then_output'])],
"then_subgraph",
[], # no inputs
[make_tensor_value_info('then_output', TensorProto.UNDEFINED, None)],
)
else_subgraph = helper.make_graph(
[make_node('Sub', ['current_value', 'sub_value'], ['else_output'])],
"else_subgraph",
[], # no inputs
[make_tensor_value_info('else_output', TensorProto.UNDEFINED, None)],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('current_value', TensorProto.FLOAT, (1,)),
('add_value', TensorProto.FLOAT, (1,)),
('sub_value', TensorProto.FLOAT, (1,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('if_output', TensorProto.FLOAT, (1,))])
def test_if_with_different_shapes_in_then_else_branches(self): # type: () -> None
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_subgraph = helper.make_graph(
[make_node('Add', ['current_value', 'add_value'], ['then_output'])],
"then_subgraph",
[], # no inputs
[make_tensor_value_info('then_output', TensorProto.UNDEFINED, (1,))],
)
else_subgraph = helper.make_graph(
[make_node('Sub', ['current_value', 'sub_value'], ['else_output'])],
"else_subgraph",
[], # no inputs
[make_tensor_value_info('else_output', TensorProto.UNDEFINED, (5,))],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('current_value', TensorProto.FLOAT, (1,)),
('add_value', TensorProto.FLOAT, (1,)),
('sub_value', TensorProto.FLOAT, (5,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('if_output', TensorProto.FLOAT, (None,))]) # type: ignore
def test_maxunpool_shape_without_output_shape(self): # type: () -> None
graph = self._make_graph(
[('xT', TensorProto.FLOAT, (1, 1, 2, 2)),
('xI', TensorProto.FLOAT, (1, 1, 2, 2))],
[make_node('MaxUnpool', ['xT', 'xI'], 'Y', kernel_shape=[2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (1, 1, 4, 4))])
def test_maxunpool_shape_with_output_shape(self): # type: () -> None
graph = self._make_graph(
[('xT', TensorProto.FLOAT, (1, 1, 2, 2)),
('xI', TensorProto.FLOAT, (1, 1, 2, 2)),
('output_shape', TensorProto.FLOAT, (4, ))],
[make_node('MaxUnpool', ['xT', 'xI', 'output_shape'], 'Y', kernel_shape=[2, 2], strides=[2, 2])],
[make_tensor_value_info("Y", TensorProto.FLOAT, None)])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, None)])
def test_onehot_without_axis(self): # type: () -> None
graph = self._make_graph(
[('indices', TensorProto.INT64, (2, 2)),
('depth', TensorProto.INT64, ()),
('values', TensorProto.FLOAT, (2, ))],
[make_node('OneHot', ['indices', 'depth', 'values'], 'Y')],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (2, 2, None))]) # type: ignore
def test_onehot_with_axis(self): # type: () -> None
graph = self._make_graph(
[('indices', TensorProto.INT64, (2, 3, 5)),
('depth', TensorProto.INT64, (1, )),
('values', TensorProto.FLOAT, (2, ))],
[make_node('OneHot', ['indices', 'depth', 'values'], 'Y', axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (2, None, 3, 5))]) # type: ignore
def test_loop(self): # type: () -> None
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Loop to match
# the GraphProto, but Loop knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('iter_num_in', TensorProto.INT64, (1,)),
make_tensor_value_info('cond_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, ())]
output_value_infos = [make_tensor_value_info('cond_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.FLOAT, (3,))]
subgraph = helper.make_graph(
[make_node('Identity', ['cond_in'], ['cond_out']),
make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['outer_scope_input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('max_trip_count', TensorProto.INT64, (1,)),
('cond_orig', TensorProto.FLOAT, (1,)),
('loop_state_orig', TensorProto.FLOAT, (2,)),
('outer_scope_input', TensorProto.FLOAT, (3,))],
[make_node('Loop', ['max_trip_count', 'cond_orig', 'loop_state_orig'], ['loop_state_final', 'loop_output'],
body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, None), # shape may change between iterations
make_tensor_value_info('loop_output', TensorProto.FLOAT, (None, 3))]) # type: ignore
def test_loop_no_state(self): # type: () -> None
input_value_infos = [make_tensor_value_info('iter_num_in', TensorProto.INT64, (1,)),
make_tensor_value_info('cond_in', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('cond_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.FLOAT, (3,))]
subgraph = helper.make_graph(
[make_node('Identity', ['cond_in'], ['cond_out']),
make_node('Identity', ['outer_scope_input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('max_trip_count', TensorProto.INT64, (1,)),
('cond_orig', TensorProto.FLOAT, (1,)),
('outer_scope_input', TensorProto.FLOAT, (3,))],
[make_node('Loop', ['max_trip_count', 'cond_orig'], ['loop_output'],
body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_output', TensorProto.FLOAT, (None, 3))]) # type: ignore
def test_constantofshape_with_input_shape(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (3,), (3, 4, 5))),
make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.INT32, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, (3,)),
make_tensor_value_info('y', TensorProto.INT32, (3, 4, 5))]) # type: ignore
def test_constantofshape_without_input_shape(self): # type: () -> None
graph = self._make_graph([('shape', TensorProto.INT64, (3, ))],
[make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.UINT8, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.UINT8, (None, None, None))]) # type: ignore
def test_constantofshape_with_shape_zero(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (3,), (0,))),
make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.INT32, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, (3,)),
make_tensor_value_info('y', TensorProto.INT32, (0,))]) # type: ignore
def test_convinteger(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (3, 4, 5, 6, 7)),
('y', TensorProto.UINT8, (5, 4, 2, 4, 3))],
[make_node('ConvInteger', ['x', 'y'], 'z', pads=[0, 1, 1, 0, 0, 1], dilations=[1, 2, 2], strides=[1, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (3, 5, 4, 1, 3))])
def test_convinetger_dilations(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 8, 8, 8)),
('y', TensorProto.INT8, (50, 4, 3, 3, 3)),
('x_zero_point', TensorProto.UINT8, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('ConvInteger', ['x', 'y', 'x_zero_point', 'y_zero_point'], 'z', dilations=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, 6, 4, 2))])
def test_convinteger_strides(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('y', TensorProto.INT8, (50, 4, 3, 3, 3)),
('x_zero_point', TensorProto.UINT8, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('ConvInteger', ['x', 'y', 'x_zero_point', 'y_zero_point'], 'z', strides=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, 6, 3, 2))])
def test_convineteger_pads(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('y', TensorProto.INT8, (50, 4, 3, 3, 3))],
[make_node('ConvInteger', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, 6, 6, 6))])
def test_convineteger_group(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('y', TensorProto.INT8, (4, 1, 8, 8, 8))],
[make_node('ConvInteger', ['x', 'y'], 'z', group=4)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 4, 1, 1, 1))])
def test_convineteger_partial_missing_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, None, 6, 4)),
('y', TensorProto.UINT8, (50, 4, 3, 3, 3)),
('x_zero_point', TensorProto.UINT8, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('ConvInteger', ['x', 'y', 'x_zero_point', 'y_zero_point'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, None, 6, 6))]) # type: ignore
def test_convineteger_partial_missing_weight_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('y', TensorProto.UINT8, (50, 4, None, 3, 3))],
[make_node('ConvInteger', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, None)])
def test_qlinearconv(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (3, 4, 5, 6, 7)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (5, 4, 2, 4, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[0, 1, 1, 0, 0, 1], dilations=[1, 2, 2], strides=[1, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (3, 5, 4, 1, 3))])
def test_qlinearconv_dilations(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 8, 8, 8)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', dilations=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 50, 6, 4, 2))])
def test_qlinearconv_strides(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.INT8, ()),
('w', TensorProto.INT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.INT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.INT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', strides=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT8, (30, 50, 6, 3, 2))])
def test_qlinearconv_pads(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.INT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.INT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 50, 6, 6, 6))])
def test_qlinearconv_group(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.INT8, ()),
('w', TensorProto.INT8, (4, 1, 8, 8, 8)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.INT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.INT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', group=4)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT8, (30, 4, 1, 1, 1))])
def test_qlinearconv_partial_missing_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, None, 6, 4)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 50, None, 6, 6))]) # type: ignore
def test_qlinearconv_partial_missing_weight_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (50, 4, None, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, None)])
def _make_qlinearmatmul_test(self, shape1, shape2): # type: (Sequence[int], Sequence[int]) -> None
expected_out_shape = np.matmul(np.arange(np.product(shape1)).reshape(shape1),
np.arange(np.product(shape2)).reshape(shape2)).shape
graph = self._make_graph(
[('a', TensorProto.UINT8, shape1),
('a_scale', TensorProto.FLOAT, ()),
('a_zero_point', TensorProto.UINT8, ()),
('b', TensorProto.UINT8, shape2),
('b_scale', TensorProto.FLOAT, ()),
('b_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearMatMul', ['a', 'a_scale', 'a_zero_point', 'b', 'b_scale', 'b_zero_point', 'y_scale', 'y_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, expected_out_shape)])
def test_qlinearmatmul(self): # type: () -> None
self._make_qlinearmatmul_test((3,), (3,))
self._make_qlinearmatmul_test((4, 2), (2, 4))
self._make_qlinearmatmul_test((2,), (2, 3))
self._make_qlinearmatmul_test((4, 2), (2,))
self._make_qlinearmatmul_test((5, 1, 4, 2), (1, 3, 2, 3))
self._make_qlinearmatmul_test((4, 2), (3, 2, 3))
def _make_qlinearmatmul_test_allow_unknown(self, shape1, shape2, expected_out_shape): # type: (Any, Any, Any) -> None
graph = self._make_graph(
[('a', TensorProto.UINT8, shape1),
('a_scale', TensorProto.FLOAT, ()),
('a_zero_point', TensorProto.UINT8, ()),
('b', TensorProto.UINT8, shape2),
('b_scale', TensorProto.FLOAT, ()),
('b_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearMatMul', ['a', 'a_scale', 'a_zero_point', 'b', 'b_scale', 'b_zero_point', 'y_scale', 'y_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, expected_out_shape)])
def test_qlinearmatmul_allow_unknown(self): # type: () -> None
self._make_qlinearmatmul_test_allow_unknown((None,), (None,), ())
self._make_qlinearmatmul_test_allow_unknown((3,), (None,), ())
self._make_qlinearmatmul_test_allow_unknown((2,), (2, "a"), ("a",))
self._make_qlinearmatmul_test_allow_unknown((4, 2), (2, "a"), (4, "a"))
self._make_qlinearmatmul_test_allow_unknown((4, None), (2, "a"), (4, "a"))
self._make_qlinearmatmul_test_allow_unknown((4, None), (None, "a"), (4, "a"))
self._make_qlinearmatmul_test_allow_unknown((1, 4, 2), ("a", 2, 5), ("a", 4, 5))
self._make_qlinearmatmul_test_allow_unknown((1, 3, 4, 2), ("a", 2, 5), (1, 3, 4, 5))
self._make_qlinearmatmul_test_allow_unknown(None, ("a", 2, 5), None)
self._make_qlinearmatmul_test_allow_unknown(None, None, None)
def _make_matmulinteger_test(self, shape1, shape2): # type: (Sequence[int], Sequence[int]) -> None
expected_out_shape = np.matmul(np.arange(np.product(shape1)).reshape(shape1),
np.arange(np.product(shape2)).reshape(shape2)).shape
graph = self._make_graph(
[('A', TensorProto.UINT8, shape1),
('B', TensorProto.UINT8, shape2),
('a_zero_point', TensorProto.UINT8, ()),
('b_zero_point', TensorProto.UINT8, ())],
[make_node('MatMulInteger', ['A', 'B', 'a_zero_point', 'b_zero_point'], ['Y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.INT32, expected_out_shape)])
def test_matmulinteger(self): # type: () -> None
self._make_matmulinteger_test((2,), (2,))
self._make_matmulinteger_test((1, 2), (2, 3))
self._make_matmulinteger_test((2,), (2, 3))
self._make_matmulinteger_test((4, 2), (2,))
self._make_matmulinteger_test((5, 1, 4, 2), (1, 3, 2, 3))
self._make_matmulinteger_test((4, 2), (3, 2, 3))
def test_quantizelinear(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QuantizeLinear', ['x', 'y_scale', 'y_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 4, 5))])
def test_dequantizelinear(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 5)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ())],
[make_node('DequantizeLinear', ['x', 'x_scale', 'x_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (30, 4, 5))])
def test_reversesequence(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('sequence_lens', TensorProto.INT64, (5,))],
[make_node('ReverseSequence', ['x', 'sequence_lens'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 5, 6))])
def test_unique_without_axis(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (2, 4, 2))],
[make_node('Unique', ['X'], ['Y', 'indices', 'inverse_indices', 'counts'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (None,)), # type: ignore
make_tensor_value_info('indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('inverse_indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('counts', TensorProto.INT64, (None,))]) # type: ignore
def test_unique_with_axis(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (2, 4, 2))],
[make_node('Unique', ['X'], ['Y', 'indices', 'inverse_indices', 'counts'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (2, None, 2)), # type: ignore
make_tensor_value_info('indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('inverse_indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('counts', TensorProto.INT64, (None,))]) # type: ignore
def test_det(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (3, 3))],
[make_node('Det', ['X'], ['Y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, ())])
graph = self._make_graph(
[('X', TensorProto.FLOAT, (4, 5, 6, 7, 7))],
[make_node('Det', ['X'], ['Y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (4, 5, 6))])
def test_tile(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('repeats', TensorProto.INT64, (3,))],
[make_node('Tile', ['x', 'repeats'], ['y'])],
[],
initializer=[make_tensor('repeats', TensorProto.INT64, (3,), (1, 2, 3))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 10, 18))])
def test_tile_raw_input_data(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('repeats', TensorProto.INT64, (3,))],
[make_node('Tile', ['x', 'repeats'], ['y'])],
[],
initializer=[make_tensor('repeats', TensorProto.INT64, (3,),
vals=np.array([1, 2, 3], dtype='<i8').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 10, 18))])
def test_tile_rank_inference(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('repeats', TensorProto.INT64, (3,))],
[make_node('Tile', ['x', 'repeats'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_linearclassifier_1D_input(self): # type: () -> None
if ONNX_ML:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5,))],
[make_node('LinearClassifier', ['x'], ['y', 'z'], domain='ai.onnx.ml', coefficients=[0.0008, -0.0008], intercepts=[2.0, 2.0], classlabels_ints=[1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1,)),
make_tensor_value_info('z', TensorProto.FLOAT, (1, 2))],
opset_imports=[make_opsetid('ai.onnx.ml', 1), make_opsetid('', 11)])
def test_linearclassifier_2D_input(self): # type: () -> None
if ONNX_ML:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('LinearClassifier', ['x'], ['y', 'z'], domain='ai.onnx.ml', coefficients=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], intercepts=[2.0, 2.0, 3.0], classlabels_ints=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (4,)),
make_tensor_value_info('z', TensorProto.FLOAT, (4, 3))],
opset_imports=[make_opsetid('ai.onnx.ml', 1), make_opsetid('', 11)])
def test_roialign_symbolic(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('rois', TensorProto.FLOAT, ('num_rois', 4)),
('batch_indices', TensorProto.INT64, ('num_rois',))],
[make_node('RoiAlign', ['x', 'rois', 'batch_indices'], ['y'], output_height=10, output_width=5)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ('num_rois', 'C', 10, 5))]) # type: ignore
def test_roialign_symbolic_defaults(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('rois', TensorProto.FLOAT, ('num_rois', 4)),
('batch_indices', TensorProto.INT64, ('num_rois',))],
[make_node('RoiAlign', ['x', 'rois', 'batch_indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ('num_rois', 'C', 1, 1))]) # type: ignore
def test_roialign_num_rois(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('rois', TensorProto.FLOAT, ('num_rois', 4)),
('batch_indices', TensorProto.INT64, (15,))],
[make_node('RoiAlign', ['x', 'rois', 'batch_indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (15, 'C', 1, 1))]) # type: ignore
def test_label_encoder_string_int64(self): # type: () -> None
if ONNX_ML:
string_list = ['A', 'm', 'y']
float_list = [94.17, 36.00]
int64_list = [12, 28, 86]
graph = self._make_graph(
[('x', TensorProto.STRING, (6, 1))],
[make_node('LabelEncoder', ['x'], ['y'], domain='ai.onnx.ml',
keys_strings=string_list, values_int64s=int64_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (6, 1))],
opset_imports=[make_opsetid('ai.onnx.ml', 2), make_opsetid('', 11)])
graph = self._make_graph(
[('x', TensorProto.INT64, (2, 3))],
[make_node('LabelEncoder', ['x'], ['y'], domain='ai.onnx.ml',
keys_int64s=int64_list, values_strings=string_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, (2, 3))],
opset_imports=[make_opsetid('ai.onnx.ml', 2), make_opsetid('', 11)])
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2,))],
[make_node('LabelEncoder', ['x'], ['y'], domain='ai.onnx.ml',
keys_floats=float_list, values_int64s=int64_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (2,))],
opset_imports=[make_opsetid('ai.onnx.ml', 2), make_opsetid('', 11)])
graph = self._make_graph(
[('x', TensorProto.INT64, (8,))],
[make_node('LabelEncoder', ['x'], ['y'], domain='ai.onnx.ml',
keys_int64s=int64_list, values_floats=float_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (8,))],
opset_imports=[make_opsetid('ai.onnx.ml', 2), make_opsetid('', 11)])
graph = self._make_graph(
[('x', TensorProto.FLOAT, ())],
[make_node('LabelEncoder', ['x'], ['y'], domain='ai.onnx.ml',
keys_floats=float_list, values_strings=string_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, ())],
opset_imports=[make_opsetid('ai.onnx.ml', 2), make_opsetid('', 11)])
graph = self._make_graph(
[('x', TensorProto.STRING, (1, 2))],
[make_node('LabelEncoder', ['x'], ['y'], domain='ai.onnx.ml',
keys_strings=string_list, values_floats=float_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 2))],
opset_imports=[make_opsetid('ai.onnx.ml', 2), make_opsetid('', 11)])
def make_sparse(self,
shape, # type: Sequence[int]
values, # type: Sequence[int]
indices_shape, # type: Sequence[int]
indices # type: Sequence[int]
): # type: (...) -> SparseTensorProto
sparse = SparseTensorProto()
sparse.dims.extend(shape)
nnz = len(values)
sparse.values.CopyFrom(helper.make_tensor('spval', TensorProto.INT64, (nnz,), values))
sparse.indices.CopyFrom(helper.make_tensor('spind', TensorProto.INT64, indices_shape, indices))
return sparse
def test_constant_sparse(self): # type: () -> None
y_shape = [100]
y_value = self.make_sparse(y_shape, [13, 17, 19], [3], [9, 27, 81])
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], sparse_value=y_value)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, y_shape)]) # type: ignore
def test_constant_value_int(self): # type: () -> None
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_int=42)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, [])])
def test_constant_value_ints(self): # type: () -> None
value_ints = [1, 2, 3]
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_ints=value_ints)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, [len(value_ints)])])
def test_constant_value_float(self): # type: () -> None
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_float=1.42)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, [])])
def test_constant_value_floats(self): # type: () -> None
value_floats = [1.0, 1.1, 1.2]
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_floats=value_floats)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, [len(value_floats)])])
def test_constant_value_string(self): # type: () -> None
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_string="String value")],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, [])])
def test_constant_value_strings(self): # type: () -> None
value_strings = ["o", "n", "n", "x"]
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_strings=value_strings)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, [len(value_strings)])])
def test_range(self): # type: () -> None
graph = self._make_graph(
[('start', TensorProto.FLOAT, ()),
('limit', TensorProto.FLOAT, ()),
('delta', TensorProto.FLOAT, ())],
[make_node('Range', ['start', 'limit', 'delta'], ['output'])],
[],
initializer=[make_tensor('start', TensorProto.FLOAT, (), (1,)),
make_tensor('limit', TensorProto.FLOAT, (), (5,)),
make_tensor('delta', TensorProto.FLOAT, (), (2,))])
self._assert_inferred(graph, [make_tensor_value_info('output', TensorProto.FLOAT, (2,))])
def test_range_rank_inference(self): # type: () -> None
graph = self._make_graph(
[('start', TensorProto.INT32, ()),
('limit', TensorProto.INT32, ()),
('delta', TensorProto.INT32, ())],
[make_node('Range', ['start', 'limit', 'delta'], ['output'])],
[],
initializer=[make_tensor('start', TensorProto.INT32, (), (1,)),
make_tensor('limit', TensorProto.INT32, (), (5,))]) # Missing 'delta' initializer
self._assert_inferred(graph, [make_tensor_value_info('output', TensorProto.INT32, (None,))]) # type: ignore
def test_gathernd(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('indices', TensorProto.INT64, (2,))],
[make_node('GatherND', ['x', 'indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (6,))])
def test_gathernd_batchdim_1(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('indices', TensorProto.INT64, (2, 1))],
[make_node('GatherND', ['x', 'indices'], ['y'], batch_dims=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))])
def test_cumsum(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3)),
('axis', TensorProto.FLOAT, (1,))],
[make_node('CumSum', ['x', 'axis'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 3))])
def test_nonmaxsuppression(self): # type: () -> None
graph = self._make_graph(
[('boxes', TensorProto.FLOAT, (1, 3, 4)),
('scores', TensorProto.FLOAT, (1, 5, 3))],
[make_node('NonMaxSuppression', ['boxes', 'scores'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (None, 3))]) # type: ignore
def test_sequence_empty(self): # type: () -> None
graph = self._make_graph(
[],
[make_node('SequenceEmpty', [], ['output'])],
[])
self._assert_inferred(graph, [make_sequence_value_info('output', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_construct(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_construct_one_input(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4))],
[make_node('SequenceConstruct', ['input1'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_construct_diff_rank(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3)),
('input3', TensorProto.FLOAT, (2, 3))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_construct_diff_dim_size(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 5)),
('input3', TensorProto.FLOAT, (2, 3, 6))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, None))]) # type: ignore
def test_sequence_insert(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('input4', TensorProto.FLOAT, (2, 3, 4))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceInsert', ['in_sequence', 'input4'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_insert_diff_rank(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('input4', TensorProto.FLOAT, (2, 3))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceInsert', ['in_sequence', 'input4'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_sequence_value_info('output_sequence', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_insert_diff_shape(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 5, 4)),
('input4', TensorProto.FLOAT, (2, 5, 2))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceInsert', ['in_sequence', 'input4'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 4)), # type: ignore
make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, None, None))]) # type: ignore
def test_sequence_at(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceAt', ['in_sequence', 'ind'], ['output'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_at_unknown_shape(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceAt', ['in_sequence', 'ind'], ['output'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, None),
make_tensor_value_info('output', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_at_unknown_dim_size(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 5)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceAt', ['in_sequence', 'ind'], ['output'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, None)), # type: ignore
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, None))]) # type: ignore
def test_sequence_erase(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceErase', ['in_sequence', 'ind'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_erase_diff_dim_size(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 5, 'x')),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceErase', ['in_sequence', 'ind'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, None, 'x'))]) # type: ignore
def test_sequence_length(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceLength', ['in_sequence'], ['len'])],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('len', TensorProto.INT64, ())]) # type: ignore
def test_split_to_sequence(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, (2,))],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (3, 3))])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (3, 4))]) # type: ignore
def test_split_to_sequence_scalar(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, ())],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (2, ))])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 4))]) # type: ignore
def test_split_to_sequence_keepdims(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], keepdims=1)],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (1, 4))]) # type: ignore
def test_split_to_sequence_not_keepdims(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], keepdims=0)],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (4, ))]) # type: ignore
def test_split_to_sequence_ignore_keepdims(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, (2,))],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'], keepdims=0)],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (3, 3))])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (3, 4))]) # type: ignore
def test_split_to_sequence_axis(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], axis=1)],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (6, 1))]) # type: ignore
def test_split_to_sequence_neg_axis(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], axis=-2)],
[])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (1, 4))]) # type: ignore
def test_split_to_sequence_split_sizes(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, (3,))],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (2, 1, 3))])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (None, 4))]) # type: ignore
def test_split_to_sequence_non_divisible(self): # type: () -> None
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, ())],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (4, ))])
self._assert_inferred(graph,
[make_sequence_value_info('output_sequence', TensorProto.FLOAT, (None, 4))]) # type: ignore
def test_concat_from_sequence(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=0)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('out', TensorProto.FLOAT, (None, 3, 'x'))]) # type: ignore
def test_concat_from_sequence_unknown_shape(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3)),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=0)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, None),
make_tensor_value_info('out', TensorProto.FLOAT, None)]) # type: ignore
def test_concat_from_sequence_unknown_dim_size(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 4, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=0)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_value_info('out', TensorProto.FLOAT, (None, None, 'x'))]) # type: ignore
def test_concat_from_sequence_axis(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 4, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=2)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_value_info('out', TensorProto.FLOAT, (2, None, None))]) # type: ignore
def test_concat_from_sequence_neg_axis(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 4, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=-3)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_value_info('out', TensorProto.FLOAT, (None, None, 'x'))]) # type: ignore
def test_concat_from_sequence_new_axis(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=2, new_axis=1)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('out', TensorProto.FLOAT, (2, 3, None, 'x'))]) # type: ignore
def test_concat_from_sequence_neg_new_axis(self): # type: () -> None
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=-1, new_axis=1)],
[])
self._assert_inferred(
graph,
[make_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('out', TensorProto.FLOAT, (2, 3, 'x', None))]) # type: ignore
def test_adagrad(self): # type: () -> None
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X', TensorProto.FLOAT, (1, 2)),
('G', TensorProto.FLOAT, (1, 2)),
('H', TensorProto.FLOAT, (1, 2))],
[make_node('Adagrad', ['R', 'T', 'X', 'G', 'H'], ['X_new', 'H_new'],
domain='ai.onnx.training')],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('X_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H_new', TensorProto.FLOAT, (1, 2))],
opset_imports=[helper.make_opsetid('', 12), helper.make_opsetid('ai.onnx.training', 1)])
def test_adagrad_multiple(self): # type: () -> None
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X1', TensorProto.FLOAT, (1, 2)),
('X2', TensorProto.FLOAT, (3, 4)),
('G1', TensorProto.FLOAT, (1, 2)),
('G2', TensorProto.FLOAT, (3, 4)),
('H1', TensorProto.FLOAT, (1, 2)),
('H2', TensorProto.FLOAT, (3, 4))],
[make_node('Adagrad', ['R', 'T', 'X1', 'X2', 'G1', 'G2', 'H1', 'H2'],
['X1_new', 'X2_new', 'H1_new', 'H2_new'],
domain='ai.onnx.training')],
[])
self._assert_inferred(graph,
[make_tensor_value_info('X1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('X2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('H1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H2_new', TensorProto.FLOAT, (3, 4))],
opset_imports=[helper.make_opsetid('', 12), helper.make_opsetid('ai.onnx.training', 1)])
def test_momentum(self): # type: () -> None
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X', TensorProto.FLOAT, (1, 2)),
('G', TensorProto.FLOAT, (1, 2)),
('V', TensorProto.FLOAT, (1, 2))],
[make_node('Momentum', ['R', 'T', 'X', 'G', 'V'], ['X_new', 'V_new'],
alpha=0.9, beta=1.0, norm_coefficient=0.02, mode='standard',
domain='ai.onnx.training')],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('X_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V_new', TensorProto.FLOAT, (1, 2))],
opset_imports=[helper.make_opsetid('', 12), helper.make_opsetid('ai.onnx.training', 1)])
def test_momentum_multiple(self): # type: () -> None
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X1', TensorProto.FLOAT, (1, 2)),
('X2', TensorProto.FLOAT, (3, 4)),
('G1', TensorProto.FLOAT, (1, 2)),
('G2', TensorProto.FLOAT, (3, 4)),
('V1', TensorProto.FLOAT, (1, 2)),
('V2', TensorProto.FLOAT, (3, 4))],
[make_node('Momentum', ['R', 'T', 'X1', 'X2', 'G1', 'G2', 'V1', 'V2'],
['X1_new', 'X2_new', 'V1_new', 'V2_new'],
alpha=0.9, beta=1.0, norm_coefficient=0.02, mode='nesterov',
domain='ai.onnx.training')],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('X1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('X2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('V1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V2_new', TensorProto.FLOAT, (3, 4))],
opset_imports=[helper.make_opsetid('', 12), helper.make_opsetid('ai.onnx.training', 1)])
def test_adam(self): # type: () -> None
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X', TensorProto.FLOAT, (1, 2)),
('G', TensorProto.FLOAT, (1, 2)),
('V', TensorProto.FLOAT, (1, 2)),
('H', TensorProto.FLOAT, (1, 2))],
[make_node('Adam', ['R', 'T', 'X', 'G', 'V', 'H'], ['X_new', 'V_new', 'H_new'],
domain='ai.onnx.training',
alpha=0.9, beta=1.0, norm_coefficient=0.02)],
[])
infos = [make_tensor_value_info('X_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H_new', TensorProto.FLOAT, (1, 2))]
self._assert_inferred(
graph,
infos,
opset_imports=[make_opsetid('ai.onnx.training', 1), make_opsetid('', 12)])
def test_adam_multiple(self): # type: () -> None
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X1', TensorProto.FLOAT, (1, 2)),
('X2', TensorProto.FLOAT, (3, 4)),
('G1', TensorProto.FLOAT, (1, 2)),
('G2', TensorProto.FLOAT, (3, 4)),
('V1', TensorProto.FLOAT, (1, 2)),
('V2', TensorProto.FLOAT, (3, 4)),
('H1', TensorProto.FLOAT, (1, 2)),
('H2', TensorProto.FLOAT, (3, 4))],
[make_node('Adam', ['R', 'T', 'X1', 'X2', 'G1', 'G2', 'V1', 'V2', 'H1', 'H2'],
['X1_new', 'X2_new', 'V1_new', 'V2_new', 'H1_new', 'H2_new'],
domain='ai.onnx.training',
alpha=0.9, beta=1.0, norm_coefficient=0.02)],
[])
infos = [make_tensor_value_info('X1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('X2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('V1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('H1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H2_new', TensorProto.FLOAT, (3, 4))]
self._assert_inferred(
graph,
infos,
opset_imports=[make_opsetid('ai.onnx.training', 1), make_opsetid('', 12)])
def test_pad_opset10(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, None, 2))],
[make_node('Pad', 'x', 'y', pads=[1, 3, 1, 1, 0, 1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, None, 4))], opset_imports=[helper.make_opsetid("", 10)]) # type: ignore
def test_constant_pad_2d_opset10(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 4))],
[make_node('Pad', 'x', 'y', pads=[0, 0, 3, 1, 0, 0, 4, 2], mode="constant", value=2.0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3, 11, 7))], opset_imports=[helper.make_opsetid("", 10)])
def test_pad(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, None, 2)),
('pads', TensorProto.INT64, (6,))],
[make_node('Pad', ['x', 'pads'], 'y')],
[],
initializer=[make_tensor('pads', TensorProto.INT64, (6,), (1, 3, 1, 1, 0, 1,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, None, 4))]) # type: ignore
def test_gatherelements_basic(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (6,)),
('indices', TensorProto.INT64, (2,))],
[make_node('GatherElements', ['x', 'indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2,))])
def test_gatherelements_indices_missing_shape(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (6,)),
('indices', TensorProto.INT64, None)], # type: ignore
[make_node('GatherElements', ['x', 'indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, None)]) # type: ignore
def test_einsum_transpose(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4))],
[make_node('Einsum', ['x'], ['y'], equation='ij->ji')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, None))]) # type: ignore
def test_einsum_dot(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1,)),
('y', TensorProto.FLOAT, (1,))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='i,i->')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_einsum_scalar(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, ()),
('y', TensorProto.FLOAT, ())],
[make_node('Einsum', ['x', 'y'], ['z'], equation=',->')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_einsum_outer_prod(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 5)),
('y', TensorProto.FLOAT, (7, 9))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='ij,ab->ijab')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None, None))]) # type: ignore
def test_einsum_sum_along_dim(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4))],
[make_node('Einsum', ['x'], ['y'], equation='i j->i ')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, ))]) # type: ignore
def test_einsum_ellipsis(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 4))],
[make_node('Einsum', ['x'], ['y'], equation='... ii ->... i')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, None))]) # type: ignore
def test_einsum_ellipsis_2(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('y', TensorProto.FLOAT, (2, 2, 2))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='...ij,...jk->...ik')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_einsum_ellipsis_3(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('y', TensorProto.FLOAT, (2, 2, 2))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='...ij,...jk')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_einsum_contraction(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7, 8)),
('y', TensorProto.FLOAT, (8, 9, 10))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='abcd,dfg->abcfg')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None, None, None))]) # type: ignore
def test_einsum_contraction_2(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5)),
('y', TensorProto.FLOAT, (3, 5))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='ijk,ik->jk')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None))]) # type: ignore
def test_einsum_batch_matmul(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 2, 3)),
('y', TensorProto.FLOAT, (5, 3, 4))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='bij , b jk-> bik')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_einsum_left_hand_eqn(self): # type: () -> None
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3)),
('y', TensorProto.FLOAT, (3, 4))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='ij,kl')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None, None))]) # type: ignore
def test_einsum_incorrect_num_inputs(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3)),
("y", TensorProto.FLOAT, (2, 3)),
("z", TensorProto.FLOAT, (2, 3))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='i,...j, k, l-> i')],
[])
self.assertRaises(checker.ValidationError, self._inferred, graph)
def test_negative_log_likehood_shape_is_NCdd(self): # type: () -> None
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, ))]) # type: ignore
def test_negative_log_likehood_shape_is_NC_with_weight(self): # type: () -> None
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,)),
('weight', TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, ))]) # type: ignore
def test_negative_log_likehood_shape_is_NC_reduction_mean(self): # type: () -> None
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='mean')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_shape_is_NC_with_weight_reduction_mean(self): # type: () -> None
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,)),
('weight', TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='mean')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2(self): # type: () -> None
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, d1, d2))]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2_with_weight(self): # type: () -> None
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2)),
("weight", TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, d1, d2))]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2_reduction_sum(self): # type: () -> None
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='sum')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2_with_weight_reduction_mean(self): # type: () -> None
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2)),
("weight", TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='mean')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_input_target_shape_mismatch(self): # type: () -> None
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, d1, d2)),
("target", TensorProto.INT64, (N, d1 + 1, d2)),
("weight", TensorProto.FLOAT, (C,)),
("loss", TensorProto.FLOAT, ())],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='mean')],
[])
self.assertRaises(checker.ValidationError, self._inferred, graph)
def test_negative_log_likehood_input_weight_shape_mismatch(self): # type: () -> None
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2)),
("weight", TensorProto.FLOAT, (C + 1,)),
("loss", TensorProto.FLOAT, (N, d1, d2))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='none')],
[])
self.assertRaises(checker.ValidationError, self._inferred, graph)
def test_softmax_cross_entropy_none(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3)),
("y", TensorProto.FLOAT, (2,))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='none')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2,))]) # type: ignore
def test_softmax_cross_entropy_mean(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3)),
("y", TensorProto.FLOAT, (2,))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='mean')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_softmax_cross_entropy_none_NCD1D2(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3, 5, 8)),
("y", TensorProto.FLOAT, (2, 5, 8))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='none')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 5, 8))]) # type: ignore
def test_softmax_cross_entropy_mean_NCD1D2(self): # type: () -> None
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3, 4, 5)),
("y", TensorProto.FLOAT, (2, 4, 5))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='mean')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_celu_function_output_shape(self): # type: () -> None
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16))],
[make_node('Celu', ['X'], ['Y'], alpha=2.0)],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 48, 16, 16))])
if __name__ == '__main__':
unittest.main()
|
import unittest
from onnx import defs, checker, helper
class TestRelu(unittest.TestCase):
def test_elu(self): # type: () -> None
self.assertTrue(defs.has('Elu'))
node_def = helper.make_node(
'Elu', ['X'], ['Y'], alpha=1.0)
checker.check_node(node_def)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
from onnx import numpy_helper
import unittest
class TestNumpyHelper(unittest.TestCase):
def _test_numpy_helper_float_type(self, dtype): # type: (np.number) -> None
a = np.random.rand(13, 37).astype(dtype)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def _test_numpy_helper_int_type(self, dtype): # type: (np.number) -> None
a = np.random.randint(
np.iinfo(dtype).min,
np.iinfo(dtype).max,
dtype=dtype,
size=(13, 37))
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def test_float(self): # type: () -> None
self._test_numpy_helper_float_type(np.float32)
def test_uint8(self): # type: () -> None
self._test_numpy_helper_int_type(np.uint8)
def test_int8(self): # type: () -> None
self._test_numpy_helper_int_type(np.int8)
def test_uint16(self): # type: () -> None
self._test_numpy_helper_int_type(np.uint16)
def test_int16(self): # type: () -> None
self._test_numpy_helper_int_type(np.int16)
def test_int32(self): # type: () -> None
self._test_numpy_helper_int_type(np.int32)
def test_int64(self): # type: () -> None
self._test_numpy_helper_int_type(np.int64)
def test_string(self): # type: () -> None
a = np.array(['Amy', 'Billy', 'Cindy', 'David']).astype(np.object)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def test_bool(self): # type: () -> None
a = np.random.randint(2, size=(13, 37)).astype(np.bool)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def test_float16(self): # type: () -> None
self._test_numpy_helper_float_type(np.float32)
def test_complex64(self): # type: () -> None
self._test_numpy_helper_float_type(np.complex64)
def test_complex128(self): # type: () -> None
self._test_numpy_helper_float_type(np.complex128)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import AttributeProto, NodeProto, GraphProto, ModelProto, TensorProto, IR_VERSION
import io
import onnx
import os
import tempfile
import unittest
from onnx import helper
class TestBasicFunctions(unittest.TestCase):
def _simple_model(self): # type: () -> ModelProto
# Create a ModelProto.
model = ModelProto()
model.ir_version = IR_VERSION
return model
def _simple_tensor(self): # type: () -> TensorProto
# Create a TensorProto.
tensor = helper.make_tensor(
name='test-tensor',
data_type=TensorProto.FLOAT,
dims=(2, 3, 4),
vals=[x + 0.5 for x in range(24)]
)
return tensor
def test_save_and_load_model(self): # type: () -> None
proto = self._simple_model()
cls = ModelProto
proto_string = onnx._serialize(proto)
# Test if input is string
loaded_proto = onnx.load_model_from_string(proto_string)
self.assertTrue(proto == loaded_proto)
# Test if input has a read function
f = io.BytesIO()
onnx.save_model(proto_string, f)
f = io.BytesIO(f.getvalue())
loaded_proto = onnx.load_model(f, cls)
self.assertTrue(proto == loaded_proto)
# Test if input is a file name
try:
fi = tempfile.NamedTemporaryFile(delete=False)
onnx.save_model(proto, fi)
fi.close()
loaded_proto = onnx.load_model(fi.name, cls)
self.assertTrue(proto == loaded_proto)
finally:
os.remove(fi.name)
def test_save_and_load_tensor(self): # type: () -> None
proto = self._simple_tensor()
cls = TensorProto
proto_string = onnx._serialize(proto)
# Test if input is string
loaded_proto = onnx.load_tensor_from_string(proto_string)
self.assertTrue(proto == loaded_proto)
# Test if input has a read function
f = io.BytesIO()
onnx.save_tensor(loaded_proto, f)
f = io.BytesIO(f.getvalue())
loaded_proto = onnx.load_tensor(f, cls)
self.assertTrue(proto == loaded_proto)
# Test if input is a file name
try:
tfile = tempfile.NamedTemporaryFile(delete=False)
onnx.save_tensor(proto, tfile)
tfile.close()
loaded_proto = onnx.load_tensor(tfile.name, cls)
self.assertTrue(proto == loaded_proto)
finally:
os.remove(tfile.name)
def test_existence(self): # type: () -> None
try:
AttributeProto
NodeProto
GraphProto
ModelProto
except Exception as e:
self.fail(
'Did not find proper onnx protobufs. Error is: {}'
.format(e))
def test_version_exists(self): # type: () -> None
model = ModelProto()
# When we create it, graph should not have a version string.
self.assertFalse(model.HasField('ir_version'))
# We should touch the version so it is annotated with the current
# ir version of the running ONNX
model.ir_version = IR_VERSION
model_string = model.SerializeToString()
model.ParseFromString(model_string)
self.assertTrue(model.HasField('ir_version'))
# Check if the version is correct.
self.assertEqual(model.ir_version, IR_VERSION)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from onnx import load, checker, NodeProto
def check_model(): # type: () -> None
parser = argparse.ArgumentParser('check-model')
parser.add_argument('model_pb', type=argparse.FileType('rb'))
args = parser.parse_args()
model = load(args.model_pb)
checker.check_model(model)
def check_node(): # type: () -> None
parser = argparse.ArgumentParser('check-node')
parser.add_argument('node_pb', type=argparse.FileType('rb'))
args = parser.parse_args()
node = NodeProto()
node.ParseFromString(args.node_pb.read())
checker.check_node(node)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from typing import Text, Sequence, Any, Type, Tuple, NewType, Optional, Dict
import six
import numpy # type: ignore
import onnx.checker
import onnx.onnx_cpp2py_export.checker as c_checker
from onnx import ModelProto, NodeProto, IR_VERSION
class DeviceType(object):
_Type = NewType('_Type', int)
CPU = _Type(0) # type: _Type
CUDA = _Type(1) # type: _Type
class Device(object):
'''
Describes device type and device id
syntax: device_type:device_id(optional)
example: 'CPU', 'CUDA', 'CUDA:1'
'''
def __init__(self, device): # type: (Text) -> None
options = device.split(':')
self.type = getattr(DeviceType, options[0])
self.device_id = 0
if len(options) > 1:
self.device_id = int(options[1])
def namedtupledict(typename, field_names, *args, **kwargs): # type: (Text, Sequence[Text], *Any, **Any) -> Type[Tuple[Any, ...]]
field_names_map = {n: i for i, n in enumerate(field_names)}
# Some output names are invalid python identifier, e.g. "0"
kwargs.setdefault(str('rename'), True)
data = namedtuple(typename, field_names, *args, **kwargs) # type: ignore
def getitem(self, key): # type: (Any, Any) -> Any
if isinstance(key, six.string_types):
key = field_names_map[key]
return super(type(self), self).__getitem__(key) # type: ignore
data.__getitem__ = getitem
return data
class BackendRep(object):
def run(self, inputs, **kwargs): # type: (Any, **Any) -> Tuple[Any, ...]
pass
class Backend(object):
@classmethod
def is_compatible(cls,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> bool
# Return whether the model is compatible with the backend.
return True
@classmethod
def prepare(cls,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> Optional[BackendRep]
# TODO Remove Optional from return type
onnx.checker.check_model(model)
return None
@classmethod
def run_model(cls,
model, # type: ModelProto
inputs, # type: Any
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> Tuple[Any, ...]
backend = cls.prepare(model, device, **kwargs)
assert backend is not None
return backend.run(inputs)
@classmethod
def run_node(cls,
node, # type: NodeProto
inputs, # type: Any
device='CPU', # type: Text
outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
**kwargs # type: Dict[Text, Any]
): # type: (...) -> Optional[Tuple[Any, ...]]
'''Simple run one operator and return the results.
Args:
outputs_info: a list of tuples, which contains the element type and
shape of each output. First element of the tuple is the dtype, and
the second element is the shape. More use case can be found in
https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py
'''
# TODO Remove Optional from return type
if 'opset_version' in kwargs:
special_context = c_checker.CheckerContext()
special_context.ir_version = IR_VERSION
special_context.opset_imports = {'': kwargs['opset_version']} # type: ignore
onnx.checker.check_node(node, special_context)
else:
onnx.checker.check_node(node)
return None
@classmethod
def supports_device(cls, device): # type: (Text) -> bool
"""
Checks whether the backend is compiled with particular device support.
In particular it's used in the testing suite.
"""
return True
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
from onnx import defs, load, AttributeProto
from onnx.backend.test.case import collect_snippets
from onnx.backend.test.runner import Runner
from onnx.backend.test.loader import load_model_tests
from typing import Any, IO, Sequence, Text, Dict, List
def is_ml(schemas): # type: (Sequence[defs.OpSchema]) -> bool
for s in schemas:
if s.domain == 'ai.onnx.ml':
return True
return False
def gen_outlines(f, ml): # type: (IO[Any], bool) -> None
f.write('# Test Coverage Report')
if ml:
f.write(' (ONNX-ML Operators)\n')
else:
f.write(' (ONNX Core Operators)\n')
f.write('## Outlines\n')
f.write('* [Node Test Coverage](#node-test-coverage)\n')
f.write('* [Model Test Coverage](#model-test-coverage)\n')
f.write('* [Overall Test Coverage](#overall-test-coverage)\n')
common_covered = [] # type: Sequence[Text]
experimental_covered = [] # type: Sequence[Text]
def gen_node_test_coverage(schemas, f, ml):
# type: (Sequence[defs.OpSchema], IO[Any], bool) -> None
global common_covered
global experimental_covered
generators = set({
'Multinomial',
'RandomNormal',
'RandomNormalLike',
'RandomUniform',
'RandomUniformLike',
})
node_tests = collect_snippets()
common_covered = sorted([s.name for s in schemas
if s.name in node_tests
and s.support_level == defs.OpSchema.SupportType.COMMON
and (s.domain == 'ai.onnx.ml') == ml])
common_no_cover = sorted([s.name for s in schemas
if s.name not in node_tests
and s.support_level == defs.OpSchema.SupportType.COMMON
and (s.domain == 'ai.onnx.ml') == ml])
common_generator = sorted([name for name in common_no_cover
if name in generators])
experimental_covered = sorted([s.name for s in schemas
if s.name in node_tests
and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL
and (s.domain == 'ai.onnx.ml') == ml])
experimental_no_cover = sorted([s.name for s in schemas
if s.name not in node_tests
and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL
and (s.domain == 'ai.onnx.ml') == ml])
experimental_generator = sorted([name for name in experimental_no_cover
if name in generators])
num_common = len(common_covered) + len(common_no_cover) \
- len(common_generator)
num_experimental = len(experimental_covered) + len(experimental_no_cover) \
- len(experimental_generator)
f.write('# Node Test Coverage\n')
f.write('## Summary\n')
if num_common:
f.write('Node tests have covered {}/{} ({:.2f}%, {} generators excluded) '
'common operators.\n\n'.format(
len(common_covered), num_common,
(len(common_covered) / float(num_common) * 100),
len(common_generator)))
else:
f.write('Node tests have covered 0/0 (N/A) common operators. \n\n')
if num_experimental:
f.write('Node tests have covered {}/{} ({:.2f}%, {} generators excluded) '
'experimental operators.\n\n'.format(
len(experimental_covered), num_experimental,
(len(experimental_covered) / float(num_experimental) * 100),
len(experimental_generator)))
else:
f.write('Node tests have covered 0/0 (N/A) experimental operators.\n\n')
titles = ['💚Covered Common Operators',
'💔No Cover Common Operators',
'💚Covered Experimental Operators',
'💔No Cover Experimental Operators',
]
all_lists = [common_covered, common_no_cover,
experimental_covered, experimental_no_cover]
for t in titles:
f.write('* [{}](#{})\n'.format(t[9:], t[9:].lower().replace(' ', '-')))
f.write('\n')
for t, l in zip(titles, all_lists):
f.write('## {}\n'.format(t))
for s in l:
f.write('### {}'.format(s))
if s in node_tests:
f.write('\nThere are {} test cases, listed as following:\n'.format(
len(node_tests[s])))
for summary, code in sorted(node_tests[s]):
f.write('<details>\n')
f.write('<summary>{}</summary>\n\n'.format(summary))
f.write('```python\n{}\n```\n\n'.format(code))
f.write('</details>\n')
else:
if s in generators:
f.write(' (random generator operator)\n')
else:
f.write(' (call for test cases)\n')
f.write('\n\n')
f.write('<br/>\n\n')
def gen_model_test_coverage(schemas, f, ml):
# type: (Sequence[defs.OpSchema], IO[Any], bool) -> None
f.write('# Model Test Coverage\n')
# Process schemas
schema_dict = dict()
for schema in schemas:
schema_dict[schema.name] = schema
# Load models from each model test using Runner.prepare_model_data
# Need to grab associated nodes
attrs = dict() # type: Dict[Text, Dict[Text, List[Any]]]
model_paths = [] # type: List[Any]
for rt in load_model_tests(kind='real'):
model_dir = Runner.prepare_model_data(rt)
model_paths.append(os.path.join(model_dir, 'model.onnx'))
model_paths.sort()
model_written = False
for model_pb_path in model_paths:
model = load(model_pb_path)
if ml:
ml_present = False
for opset in model.opset_import:
if opset.domain == 'ai.onnx.ml':
ml_present = True
if not ml_present:
continue
else:
model_written = True
f.write('## {}\n'.format(model.graph.name))
# Deconstruct model
num_covered = 0
for node in model.graph.node:
if node.op_type in common_covered or node.op_type in experimental_covered:
num_covered += 1
# Add details of which nodes are/aren't covered
# Iterate through and store each node's attributes
for attr in node.attribute:
if node.op_type not in attrs:
attrs[node.op_type] = dict()
if attr.name not in attrs[node.op_type]:
attrs[node.op_type][attr.name] = []
if attr.type == AttributeProto.FLOAT:
if attr.f not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.f)
elif attr.type == AttributeProto.INT:
if attr.i not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.i)
elif attr.type == AttributeProto.STRING:
if attr.s not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.s)
elif attr.type == AttributeProto.TENSOR:
if attr.t not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.t)
elif attr.type == AttributeProto.GRAPH:
if attr.g not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.g)
elif attr.type == AttributeProto.FLOATS:
if attr.floats not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.floats)
elif attr.type == AttributeProto.INTS:
if attr.ints not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.ints)
elif attr.type == AttributeProto.STRINGS:
if attr.strings not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.strings)
elif attr.type == AttributeProto.TENSORS:
if attr.tensors not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.tensors)
elif attr.type == AttributeProto.GRAPHS:
if attr.graphs not in attrs[node.op_type][attr.name]:
attrs[node.op_type][attr.name].append(attr.graphs)
f.write('\n{} has {} nodes. Of these, {} are covered by node tests ({}%)\n\n\n'.format(
model.graph.name, num_covered, len(model.graph.node), 100.0 * float(
num_covered) / float(len(model.graph.node))))
# Iterate through attrs, print
f.write('<details>\n')
f.write('<summary>nodes</summary>\n\n')
for op in sorted(attrs):
f.write('<details>\n')
# Get total number of attributes for node schema
f.write('<summary>{}: {} out of {} attributes covered</summary>\n\n'
.format(op, len(attrs[op].keys()), len(schema_dict[op]
.attributes)))
for attribute in sorted(schema_dict[op].attributes):
if attribute in attrs[op]:
f.write('{}: {}\n'.format(attribute, len(attrs[op][attribute])))
else:
f.write('{}: 0\n'.format(attribute))
f.write('</details>\n')
f.write('</details>\n\n\n')
if not model_written and ml:
f.write('No model tests present for selected domain\n')
def gen_overall_test_coverage(schemas, f, ml):
# type: (Sequence[defs.OpSchema], IO[Any], bool) -> None
f.write('# Overall Test Coverage\n')
f.write('## To be filled.\n')
def main():
# type: () -> None
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))))
docs_dir = os.path.join(base_dir, 'docs')
schemas = defs.get_all_schemas()
has_ml = is_ml(schemas)
fname = os.path.join(docs_dir, 'TestCoverage.md')
with io.open(fname, 'w+', newline='', encoding="utf-8") as f: # type: ignore
gen_outlines(f, False)
gen_node_test_coverage(schemas, f, False)
gen_model_test_coverage(schemas, f, False)
gen_overall_test_coverage(schemas, f, False)
if has_ml:
fname = os.path.join(docs_dir, 'TestCoverage-ml.md')
with io.open(fname, 'w+', newline='', encoding="utf-8") as f: # type: ignore
gen_outlines(f, True)
gen_node_test_coverage(schemas, f, True)
gen_model_test_coverage(schemas, f, True)
gen_overall_test_coverage(schemas, f, True)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# for backward compatibility
from .runner import Runner as BackendTest # noqa
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import os
import shutil
import onnx.backend.test.case.node as node_test
import onnx.backend.test.case.model as model_test
from onnx import numpy_helper
from typing import Text
TOP_DIR = os.path.realpath(os.path.dirname(__file__))
DATA_DIR = os.path.join(TOP_DIR, 'data')
def generate_data(args): # type: (argparse.Namespace) -> None
def prepare_dir(path): # type: (Text) -> None
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
cases = model_test.collect_testcases() + node_test.collect_testcases()
for case in cases:
output_dir = os.path.join(
args.output, case.kind, case.name)
prepare_dir(output_dir)
if case.kind == 'real':
with open(os.path.join(output_dir, 'data.json'), 'w') as fi:
json.dump({
'url': case.url,
'model_name': case.model_name,
'rtol': case.rtol,
'atol': case.atol,
}, fi, sort_keys=True)
else:
with open(os.path.join(output_dir, 'model.onnx'), 'wb') as f:
f.write(case.model.SerializeToString())
for i, (inputs, outputs) in enumerate(case.data_sets):
data_set_dir = os.path.join(
output_dir, 'test_data_set_{}'.format(i))
prepare_dir(data_set_dir)
for j, input_np in enumerate(inputs):
tensor = numpy_helper.from_array(
input_np, case.model.graph.input[j].name)
with open(os.path.join(
data_set_dir, 'input_{}.pb'.format(j)), 'wb') as f:
f.write(tensor.SerializeToString())
for j, output_np in enumerate(outputs):
tensor = numpy_helper.from_array(
output_np, case.model.graph.output[j].name)
with open(os.path.join(
data_set_dir, 'output_{}.pb'.format(j)), 'wb') as f:
f.write(tensor.SerializeToString())
def parse_args(): # type: () -> argparse.Namespace
parser = argparse.ArgumentParser('backend-test-tools')
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser('generate-data', help='convert testcases to test data')
subparser.add_argument('-o', '--output', default=DATA_DIR,
help='output directory (default: %(default)s)')
subparser.set_defaults(func=generate_data)
return parser.parse_args()
def main(): # type: () -> None
args = parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import functools
import glob
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import unittest
import numpy as np # type: ignore
import onnx
from onnx import helper, numpy_helper, NodeProto, ModelProto
from onnx.backend.base import Backend
from six.moves.urllib.request import urlretrieve
from ..loader import load_model_tests
from ..case.test_case import TestCase
from .item import TestItem
from typing import Optional, Pattern, Set, Dict, Text, Type, Sequence, Any, Callable, Union, Iterable, List
class BackendIsNotSupposedToImplementIt(unittest.SkipTest):
pass
def retry_excute(times): # type: (int) -> Callable[[Callable[..., Any]], Callable[..., Any]]
assert times >= 1
def wrapper(func): # type: (Callable[..., Any]) -> Callable[..., Any]
@functools.wraps(func)
def wrapped(*args, **kwargs): # type: (*Any, **Any) -> Any
for i in range(1, times + 1):
try:
return func(*args, **kwargs)
except Exception:
print('{} times tried'.format(i))
if i == times:
raise
time.sleep(5 * i)
return wrapped
return wrapper
class Runner(object):
def __init__(self, backend, parent_module=None): # type: (Type[Backend], Optional[str]) -> None
self.backend = backend
self._parent_module = parent_module
self._include_patterns = set() # type: Set[Pattern[Text]]
self._exclude_patterns = set() # type: Set[Pattern[Text]]
# This is the source of the truth of all test functions.
# Properties `test_cases`, `test_suite` and `tests` will be
# derived from it.
# {category: {name: func}}
self._test_items = defaultdict(dict) # type: Dict[Text, Dict[Text, TestItem]]
for rt in load_model_tests(kind='node'):
self._add_model_test(rt, 'Node')
for rt in load_model_tests(kind='real'):
self._add_model_test(rt, 'Real')
for rt in load_model_tests(kind='simple'):
self._add_model_test(rt, 'Simple')
for ct in load_model_tests(kind='pytorch-converted'):
self._add_model_test(ct, 'PyTorchConverted')
for ot in load_model_tests(kind='pytorch-operator'):
self._add_model_test(ot, 'PyTorchOperator')
def _get_test_case(self, name): # type: (Text) -> Type[unittest.TestCase]
test_case = type(str(name), (unittest.TestCase,), {})
if self._parent_module:
test_case.__module__ = self._parent_module
return test_case
def include(self, pattern): # type: (Text) -> Runner
self._include_patterns.add(re.compile(pattern))
return self
def exclude(self, pattern): # type: (Text) -> Runner
self._exclude_patterns.add(re.compile(pattern))
return self
def enable_report(self): # type: () -> Runner
import pytest # type: ignore
for category, items_map in self._test_items.items():
for name, item in items_map.items():
item.func = pytest.mark.onnx_coverage(item.proto, category)(item.func)
return self
@property
def _filtered_test_items(self): # type: () -> Dict[Text, Dict[Text, TestItem]]
filtered = {} # type: Dict[Text, Dict[Text, TestItem]]
for category, items_map in self._test_items.items():
filtered[category] = {}
for name, item in items_map.items():
if (self._include_patterns
and (not any(include.search(name)
for include in self._include_patterns))):
item.func = unittest.skip(
'no matched include pattern'
)(item.func)
for exclude in self._exclude_patterns:
if exclude.search(name):
item.func = unittest.skip(
'matched exclude pattern "{}"'.format(
exclude.pattern)
)(item.func)
filtered[category][name] = item
return filtered
@property
def test_cases(self): # type: () -> Dict[str, Type[unittest.TestCase]]
'''
List of test cases to be applied on the parent scope
Example usage:
globals().update(BackendTest(backend).test_cases)
'''
test_cases = {}
for category, items_map in self._filtered_test_items.items():
test_case_name = str('OnnxBackend{}Test').format(category)
test_case = self._get_test_case(test_case_name)
for name, item in sorted(items_map.items()):
setattr(test_case, name, item.func)
test_cases[test_case_name] = test_case
return test_cases
@property
def test_suite(self): # type: () -> unittest.TestSuite
'''
TestSuite that can be run by TestRunner
Example usage:
unittest.TextTestRunner().run(BackendTest(backend).test_suite)
'''
suite = unittest.TestSuite()
for case in sorted(self.test_cases.values()):
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(case))
return suite
# For backward compatibility (we used to expose `.tests`)
@property
def tests(self): # type: () -> Type[unittest.TestCase]
'''
One single unittest.TestCase that hosts all the test functions
Example usage:
onnx_backend_tests = BackendTest(backend).tests
'''
tests = self._get_test_case('OnnxBackendTest')
for items_map in sorted(self._filtered_test_items.values()):
for name, item in sorted(items_map.items()):
setattr(tests, name, item.func)
return tests
@classmethod
def assert_similar_outputs(cls, ref_outputs, outputs, rtol, atol): # type: (Sequence[Any], Sequence[Any], float, float) -> None
np.testing.assert_equal(len(ref_outputs), len(outputs))
for i in range(len(outputs)):
np.testing.assert_equal(ref_outputs[i].dtype, outputs[i].dtype)
if ref_outputs[i].dtype == np.object:
np.testing.assert_array_equal(ref_outputs[i], outputs[i])
else:
np.testing.assert_allclose(
ref_outputs[i],
outputs[i],
rtol=rtol,
atol=atol)
@classmethod
@retry_excute(3)
def download_model(cls, model_test, model_dir, models_dir): # type: (TestCase, Text, Text) -> None
# On Windows, NamedTemporaryFile can not be opened for a
# second time
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Start downloading model {} from {}'.format(
model_test.model_name,
model_test.url))
urlretrieve(model_test.url, download_file.name)
print('Done')
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
except Exception as e:
print('Failed to prepare data for model {}: {}'.format(
model_test.model_name, e))
raise
finally:
os.remove(download_file.name)
@classmethod
def prepare_model_data(cls, model_test): # type: (TestCase) -> Text
onnx_home = os.path.expanduser(os.getenv('ONNX_HOME', os.path.join('~', '.onnx')))
models_dir = os.getenv('ONNX_MODELS',
os.path.join(onnx_home, 'models'))
model_dir = os.path.join(models_dir, model_test.model_name) # type: Text
if not os.path.exists(os.path.join(model_dir, 'model.onnx')):
if os.path.exists(model_dir):
bi = 0
while True:
dest = '{}.old.{}'.format(model_dir, bi)
if os.path.exists(dest):
bi += 1
continue
shutil.move(model_dir, dest)
break
os.makedirs(model_dir)
cls.download_model(model_test=model_test, model_dir=model_dir, models_dir=models_dir)
return model_dir
def _add_test(self,
category, # type: Text
test_name, # type: Text
test_func, # type: Callable[..., Any]
report_item, # type: List[Optional[Union[ModelProto, NodeProto]]]
devices=('CPU', 'CUDA'), # type: Iterable[Text]
): # type: (...) -> None
# We don't prepend the 'test_' prefix to improve greppability
if not test_name.startswith('test_'):
raise ValueError(
'Test name must start with test_: {}'.format(test_name))
def add_device_test(device): # type: (Text) -> None
device_test_name = '{}_{}'.format(test_name, device.lower())
if device_test_name in self._test_items[category]:
raise ValueError(
'Duplicated test name "{}" in category "{}"'.format(
device_test_name, category))
@unittest.skipIf( # type: ignore
not self.backend.supports_device(device),
"Backend doesn't support device {}".format(device))
@functools.wraps(test_func)
def device_test_func(*args, **kwargs): # type: (*Any, **Any) -> Any
try:
return test_func(*args, device=device, **kwargs)
except BackendIsNotSupposedToImplementIt as e:
# hacky verbose reporting
if '-v' in sys.argv or '--verbose' in sys.argv:
print('Test {} is effectively skipped: {}'.format(
device_test_name, e))
self._test_items[category][device_test_name] = TestItem(
device_test_func, report_item)
for device in devices:
add_device_test(device)
def _add_model_test(self, model_test, kind): # type: (TestCase, Text) -> None
# model is loaded at runtime, note sometimes it could even
# never loaded if the test skipped
model_marker = [None] # type: List[Optional[Union[ModelProto, NodeProto]]]
def run(test_self, device): # type: (Any, Text) -> None
if model_test.model_dir is None:
model_dir = self.prepare_model_data(model_test)
else:
model_dir = model_test.model_dir
model_pb_path = os.path.join(model_dir, 'model.onnx')
model = onnx.load(model_pb_path)
model_marker[0] = model
if hasattr(self.backend, 'is_compatible') \
and callable(self.backend.is_compatible) \
and not self.backend.is_compatible(model):
raise unittest.SkipTest('Not compatible with backend')
prepared_model = self.backend.prepare(model, device)
assert prepared_model is not None
# TODO after converting all npz files to protobuf, we can delete this.
for test_data_npz in glob.glob(
os.path.join(model_dir, 'test_data_*.npz')):
test_data = np.load(test_data_npz, encoding='bytes')
inputs = list(test_data['inputs'])
outputs = list(prepared_model.run(inputs))
ref_outputs = test_data['outputs']
self.assert_similar_outputs(ref_outputs, outputs,
rtol=model_test.rtol,
atol=model_test.atol)
for test_data_dir in glob.glob(
os.path.join(model_dir, "test_data_set*")):
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for i in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
outputs = list(prepared_model.run(inputs))
self.assert_similar_outputs(ref_outputs, outputs,
rtol=model_test.rtol,
atol=model_test.atol)
self._add_test(kind + 'Model', model_test.name, run, model_marker)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Callable, Any, Union, List, Optional
from onnx import NodeProto, ModelProto
# A container that hosts the test function and the associated
# test item (ModelProto)
class TestItem(object):
def __init__(self, func, proto): # type: (Callable[..., Any], List[Optional[Union[ModelProto, NodeProto]]]) -> None
self.func = func
self.proto = proto
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
from ..case.test_case import TestCase
from typing import List, Text, Optional
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(os.path.dirname(__file__))),
'data')
def load_model_tests(
data_dir=DATA_DIR, # type: Text
kind=None, # type: Optional[Text]
): # type: (...) -> List[TestCase]
'''Load model test cases from on-disk data files.
'''
supported_kinds = os.listdir(data_dir)
if kind not in supported_kinds:
raise ValueError("kind must be one of {}".format(supported_kinds))
testcases = []
kind_dir = os.path.join(data_dir, kind)
for test_name in os.listdir(kind_dir):
case_dir = os.path.join(kind_dir, test_name)
# skip the non-dir files, such as generated __init__.py.
rtol = 1e-3
atol = 1e-7
if not os.path.isdir(case_dir):
continue
if os.path.exists(os.path.join(case_dir, 'model.onnx')):
url = None
model_name = test_name[len('test_')]
model_dir = case_dir # type: Optional[Text]
else:
with open(os.path.join(case_dir, 'data.json')) as f:
data = json.load(f)
url = data['url']
model_name = data['model_name']
rtol = data.get('rtol', 1e-3)
atol = data.get('atol', 1e-7)
model_dir = None
testcases.append(
TestCase(
name=test_name,
url=url,
model_name=model_name,
model_dir=model_dir,
model=None,
data_sets=None,
kind=kind,
rtol=rtol,
atol=atol,
))
return testcases
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from .base import Snippets
from .utils import import_recursive
from typing import Dict, Text, List, Tuple
def collect_snippets(): # type: () -> Dict[Text, List[Tuple[Text, Text]]]
import_recursive(sys.modules[__name__])
return Snippets
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
TestCase = namedtuple('TestCase', [
'name',
'model_name',
'url',
'model_dir',
'model',
'data_sets',
'kind',
'rtol',
'atol',
])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import pkgutil
from types import ModuleType
from typing import Optional, List
import numpy as np # type: ignore
all_numeric_dtypes = [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
]
def import_recursive(package): # type: (ModuleType) -> None
"""
Takes a package and imports all modules underneath it
"""
pkg_dir = None # type: Optional[List[str]]
pkg_dir = package.__path__ # type: ignore
module_location = package.__name__
for (_module_loader, name, ispkg) in pkgutil.iter_modules(pkg_dir):
module_name = "{}.{}".format(module_location, name) # Module/package
module = importlib.import_module(module_name)
if ispkg:
import_recursive(module)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import inspect
from textwrap import dedent
from typing import Dict, Text, List, Tuple, Type, Sequence, Any
import numpy as np # type: ignore
from six import add_metaclass
def process_snippet(op_name, name, export): # type: (Text, Text, Any) -> Tuple[Text, Text]
snippet_name = name[len('export_'):] or op_name.lower()
source_code = dedent(inspect.getsource(export))
# remove the function signature line
lines = source_code.splitlines()
assert lines[0] == '@staticmethod'
assert lines[1].startswith('def export')
return snippet_name, dedent("\n".join(lines[2:]))
Snippets = defaultdict(list) # type: Dict[Text, List[Tuple[Text, Text]]]
class _Exporter(type):
exports = defaultdict(list) # type: Dict[Text, List[Tuple[Text, Text]]]
def __init__(cls, name, bases, dct): # type: (str, Tuple[Type[Any], ...], Dict[str, Any]) -> None
for k, v in dct.items():
if k.startswith('export'):
if not isinstance(v, staticmethod):
raise ValueError(
'Only staticmethods could be named as export.*')
export = getattr(cls, k)
Snippets[name].append(process_snippet(name, k, export))
# export functions should call expect and so populate
# TestCases
np.random.seed(seed=0)
export()
super(_Exporter, cls).__init__(name, bases, dct)
@add_metaclass(_Exporter)
class Base(object):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ShrinkTest(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Shrink', ['x'], ['y'], lambd=1.5, bias=1.5,)
graph = onnx.helper.make_graph(
nodes=[node],
name='Shrink',
inputs=[onnx.helper.make_tensor_value_info(
'x', onnx.TensorProto.FLOAT, [5])],
outputs=[onnx.helper.make_tensor_value_info(
'y', onnx.TensorProto.FLOAT, [5])])
model = onnx.helper.make_model(graph,
producer_name='backend-test')
x = np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)
y = np.array([-0.5, 0.0, 0.0, 0.0, 0.5], dtype=np.float32)
expect(model, inputs=[x], outputs=[y],
name='test_shrink')
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
from typing import Sequence
class NormalizeStrings(Base):
@staticmethod
def export(): # type: () -> None
def make_graph(node, input_shape, output_shape): # type: (onnx.helper.NodeProto, Sequence[int], Sequence[int]) -> onnx.helper.GraphProto
graph = onnx.helper.make_graph(
nodes=[node],
name='StringNormalizer',
inputs=[onnx.helper.make_tensor_value_info('x',
onnx.TensorProto.STRING,
input_shape)],
outputs=[onnx.helper.make_tensor_value_info('y',
onnx.TensorProto.STRING,
output_shape)])
return graph
#1st model_monday_casesensintive_nochangecase
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
is_case_sensitive=1,
stopwords=stopwords
)
x = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
y = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_casesensintive_nochangecase")
#2nd model_nostopwords_nochangecase
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
is_case_sensitive=1
)
x = np.array([u'monday', u'tuesday']).astype(np.object)
y = x
graph = make_graph(node, [2], [2])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_nostopwords_nochangecase")
# 3rd model_monday_casesensintive_lower
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='LOWER',
is_case_sensitive=1,
stopwords=stopwords
)
x = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
y = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_casesensintive_lower")
#4 model_monday_casesensintive_upper
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
is_case_sensitive=1,
stopwords=stopwords
)
x = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
y = np.array([u'TUESDAY', u'WEDNESDAY', u'THURSDAY']).astype(np.object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_casesensintive_upper")
#5 monday_insensintive_upper_twodim
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
stopwords=stopwords
)
input_shape = [1, 6]
output_shape = [1, 4]
x = np.array([u'Monday', u'tuesday', u'wednesday', u'Monday', u'tuesday', u'wednesday']).astype(np.object).reshape(input_shape)
y = np.array([u'TUESDAY', u'WEDNESDAY', u'TUESDAY', u'WEDNESDAY']).astype(np.object).reshape(output_shape)
graph = make_graph(node, input_shape, output_shape)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_insensintive_upper_twodim")
#6 monday_empty_output
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
is_case_sensitive=0,
stopwords=stopwords
)
x = np.array([u'monday', u'monday']).astype(np.object)
y = np.array([u'']).astype(np.object)
graph = make_graph(node, [2], [1])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_empty_output")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Gradient(Base):
@staticmethod
def export_gradient_scalar_add(): # type: () -> None
add_node = onnx.helper.make_node('Add',
['a', 'b'], ['c'], name='my_add')
gradient_node = onnx.helper.make_node(
'Gradient', ['a', 'b'],
['dc_da', 'dc_db'], name='my_gradient',
domain='ai.onnx.training',
xs=['a', 'b'], y='c')
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# dc / da = d(a+b) / da = 1
dc_da = np.array(1).astype(np.float32)
# db / db = d(a+b) / db = 1
dc_db = np.array(1).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[add_node, gradient_node],
name='GradientOfAdd',
inputs=[
onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,
[])],
outputs=[
onnx.helper.make_tensor_value_info('c', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('dc_da',
onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info('dc_db',
onnx.TensorProto.FLOAT, [])])
opsets = [
onnx.helper.make_operatorsetid('', 12),
onnx.helper.make_operatorsetid('ai.onnx.training', 1)]
model = onnx.helper.make_model(
graph,
producer_name='backend-test',
opset_imports=opsets)
expect(model, inputs=[a, b], outputs=[c, dc_da, dc_db],
name='test_gradient_of_add')
@staticmethod
def export_gradient_scalar_add_and_mul(): # type: () -> None
add_node = onnx.helper.make_node('Add',
['a', 'b'], ['c'], name='my_add')
mul_node = onnx.helper.make_node('Mul',
['c', 'a'], ['d'], name='my_mul')
gradient_node = onnx.helper.make_node(
'Gradient', ['a', 'b'],
['dd_da', 'dd_db'], name='my_gradient',
domain='ai.onnx.training',
xs=['a', 'b'], y='d')
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# d = a * c = a * (a + b)
d = a * c
# dd / da = d(a*a+a*b) / da = 2 * a + b
dd_da = (2 * a + b).astype(np.float32)
# dd / db = d(a*a+a*b) / db = a
dd_db = a
graph = onnx.helper.make_graph(
nodes=[add_node, mul_node, gradient_node],
name='GradientOfTwoOperators',
inputs=[
onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,
[])],
outputs=[
onnx.helper.make_tensor_value_info('d', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('dd_da',
onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info('dd_db',
onnx.TensorProto.FLOAT, [])])
opsets = [
onnx.helper.make_operatorsetid('', 12),
onnx.helper.make_operatorsetid('ai.onnx.training', 1)]
model = onnx.helper.make_model(graph,
producer_name='backend-test',
opset_imports=opsets)
expect(model, inputs=[a, b], outputs=[d, dd_da, dd_db],
name='test_gradient_of_add_and_mul')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
import typing
from ..base import Base
from . import expect
from onnx import TensorProto
from typing import List, Optional, Text, Union
def SequenceEmptyImpl(): # type: () -> List[Optional[np.ndarray]]
return []
def SequenceConstructImpl(*tensors): # type: (*np.ndarray) -> List[np.ndarray]
return list(tensors)
def SequenceInsertImpl(sequence, tensor, position=None):
# type: (List[np.ndarray], np.ndarray, Optional[int]) -> List[np.ndarray]
if position is None:
position = len(sequence)
sequence.insert(position, tensor)
return sequence
def SequenceAtImpl(sequence, position):
# type: (List[np.ndarray], int) -> np.ndarray
return sequence[position]
def SequenceEraseImpl(sequence, position=None):
# type: (List[np.ndarray], Optional[int]) -> List[Optional[np.ndarray]]
if position is None:
position = -1
del sequence[position]
return sequence
def SequenceLengthImpl(sequence):
# type: (List[np.ndarray]) -> np.int64
return np.int64(len(sequence))
def SplitToSequenceImpl(tensor, split=None, axis=0, keepdims=1):
# type: (np.ndarray, Optional[Union[int, List[int]]], int, int) -> List[np.ndarray]
dim_size = tensor.shape[axis]
if split is None:
split = 1
split_indices = [i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size]
if not keepdims:
results = np.array_split(tensor, split_indices, axis)
return [np.squeeze(res, axis) for res in results]
if np.isscalar(split):
split_indices = [i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size] # type: ignore
else:
split_indices = np.cumsum(split) + 1
return np.array_split(tensor, split_indices, axis) # type: ignore
def ConcatFromSequenceImpl(sequence, axis, new_axis=0):
# type: (List[np.ndarray], int, Optional[int]) -> np.ndarray
if not new_axis:
return np.concatenate(sequence, axis)
else:
return np.stack(sequence, axis)
class Sequence(Base):
@staticmethod
def export(): # type: () -> None
def make_graph(
nodes, # type: List[onnx.helper.NodeProto]
input_shapes, # type: List[Optional[typing.Sequence[Union[Text, int]]]]
output_shapes, # type: List[Optional[typing.Sequence[Union[Text, int]]]]
input_names, # type: List[Text]
output_names, # type: List[Text]
input_types, # type: List[TensorProto.DataType]
output_types, # type: List[TensorProto.DataType]
initializers=None # type: Optional[List[TensorProto]]
): # type: (...) -> onnx.helper.GraphProto
graph = onnx.helper.make_graph(
nodes=nodes,
name='Sequence',
inputs=[
onnx.helper.make_tensor_value_info(
name,
input_type,
input_shape)
for name, input_type, input_shape in zip(input_names, input_types, input_shapes)],
outputs=[
onnx.helper.make_tensor_value_info(
name,
output_type,
output_shape)
for name, output_type, output_shape in zip(output_names, output_types, output_shapes)],
initializer=initializers)
return graph
#1st testcase - insert and at.
# 1. SequenceEmpty: -> []
# 2. SequenceInsert(x): -> [x]
# 3. SequenceInsert(y): -> [x, y]
# 4. SequenceInsert(z, 1): -> [x, z, y]
# 5. SequenceAt(2): -> y
seq_empty_node = onnx.helper.make_node('SequenceEmpty', [], ['Seq_empty'])
seq_insert_node = onnx.helper.make_node('SequenceInsert', ['Seq_empty', 'X'], ['Seq_1'])
seq_insert_node2 = onnx.helper.make_node('SequenceInsert', ['Seq_1', 'Y'], ['Seq_2'])
seq_insert_node3 = onnx.helper.make_node('SequenceInsert', ['Seq_2', 'Z', 'pos'], ['Seq_3'])
seq_at_node = onnx.helper.make_node('SequenceAt', ['Seq_3', 'pos_at'], ['out'])
x_shape = [2, 3, 4]
y_shape = [1, 3, 4]
z_shape = [3, 3, 4]
out_shape = [None, 3, 4]
x = np.ones(x_shape, dtype=np.float32)
y = np.zeros(y_shape, dtype=np.float32)
z = np.ones(z_shape, dtype=np.float32) * 2
pos_val = 1
pos_at_val = 2
out = SequenceEmptyImpl()
out = SequenceInsertImpl(out, x)
out = SequenceInsertImpl(out, y)
out = SequenceInsertImpl(out, z, pos_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, y)
pos = onnx.helper.make_tensor('pos', TensorProto.INT64, (), (pos_val, ))
pos_at = onnx.helper.make_tensor('pos_at', TensorProto.INT64, (), (pos_at_val, ))
graph = make_graph(
[seq_empty_node, seq_insert_node, seq_insert_node2, seq_insert_node3, seq_at_node],
[x_shape, y_shape, z_shape, [], []], # type: ignore
[out_shape], # type: ignore
['X', 'Y', 'Z', 'pos', 'pos_at'],
['out'],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore
[onnx.TensorProto.FLOAT],
[pos, pos_at])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model1")
#2nd testcase - erase and at.
# 1. SequenceConstruct(x, y, z): -> [x, y, z]
# 2. SequenceErase(1): -> [x, z]
# 3. SequenceAt(1): -> z
seq_construct_node = onnx.helper.make_node('SequenceConstruct', ['X', 'Y', 'Z'], ['seq_1'])
seq_erase_node = onnx.helper.make_node('SequenceErase', ['seq_1', 'pos_erase'], ['seq_2'])
seq_at_node = onnx.helper.make_node('SequenceAt', ['seq_2', 'pos_at'], ['out'])
tensor_shape = [2, 3, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
pos_erase_val = 1
pos_at_val = 1
out = SequenceConstructImpl(x, y, z)
out = SequenceEraseImpl(out, pos_erase_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, z)
pos_erase = onnx.helper.make_tensor('pos_erase', TensorProto.INT64, (), (pos_erase_val, ))
pos_at = onnx.helper.make_tensor('pos_at', TensorProto.INT64, (), (pos_at_val, ))
graph = make_graph(
[seq_construct_node, seq_erase_node, seq_at_node],
[tensor_shape, tensor_shape, tensor_shape, [], []], # type: ignore
[tensor_shape], # type: ignore
['X', 'Y', 'Z', 'pos_erase', 'pos_at'],
['out'],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore
[onnx.TensorProto.FLOAT],
[pos_erase, pos_at])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model2")
#3rd testcase - erase, insert and at, with negative index value.
# 1. SequenceConstruct(x, y, z): -> [x, y, z]
# 2. SequenceErase(-3): -> [y, z]
# 3. SequenceInsert(x, -1): -> [y, x, z]
# 4. SequenceAt(-1): -> z
seq_construct_node = onnx.helper.make_node('SequenceConstruct', ['X', 'Y', 'Z'], ['seq_1'])
seq_erase_node = onnx.helper.make_node('SequenceErase', ['seq_1', 'pos_erase'], ['seq_2'])
seq_insert_node = onnx.helper.make_node('SequenceInsert', ['seq_2', 'X', 'pos_insert'], ['seq_3'])
seq_at_node = onnx.helper.make_node('SequenceAt', ['seq_3', 'pos_at'], ['out'])
tensor_shape = [2, 3, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
pos_erase_val = -3
pos_insert_val = -1
pos_at_val = -1
out = SequenceConstructImpl(x, y, z)
out = SequenceEraseImpl(out, pos_erase_val)
out = SequenceInsertImpl(out, x, pos_insert_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, z)
pos_erase = onnx.helper.make_tensor('pos_erase', TensorProto.INT64, (), (pos_erase_val, ))
pos_insert = onnx.helper.make_tensor('pos_insert', TensorProto.INT64, (), (pos_insert_val, ))
pos_at = onnx.helper.make_tensor('pos_at', TensorProto.INT64, (), (pos_at_val, ))
graph = make_graph(
[seq_construct_node, seq_erase_node, seq_insert_node, seq_at_node],
[tensor_shape, tensor_shape, tensor_shape, [], [], []], # type: ignore
[tensor_shape], # type: ignore
['X', 'Y', 'Z', 'pos_erase', 'pos_insert', 'pos_at'],
['out'],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
[pos_erase, pos_insert, pos_at])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model3")
#4th testcase - concat
seq_construct_node = onnx.helper.make_node('SequenceConstruct', ['X', 'Y', 'Z'], ['seq_1'])
seq_concat_node = onnx.helper.make_node('ConcatFromSequence', ['seq_1'], ['out'], axis=1)
tensor_shape = [2, 3, 4]
concat_out_shape = [2, None, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
out = SequenceConstructImpl(x, y, z)
concat_out = ConcatFromSequenceImpl(out, 1)
graph = make_graph(
[seq_construct_node, seq_concat_node],
[tensor_shape] * 3, # type: ignore
[concat_out_shape], # type: ignore
['X', 'Y', 'Z'],
['out'],
[onnx.TensorProto.FLOAT] * 3, # type: ignore
[onnx.TensorProto.FLOAT])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model4")
#5th testcase - concat with new_axis = 1
seq_construct_node = onnx.helper.make_node('SequenceConstruct', ['X', 'Y', 'Z'], ['seq_1'])
seq_concat_node = onnx.helper.make_node('ConcatFromSequence', ['seq_1'], ['out'], axis=-1, new_axis=1)
tensor_shape = [2, 3, 4]
concat_out_shape = [2, 3, 4, 3]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
out = SequenceConstructImpl(x, y, z)
concat_out = ConcatFromSequenceImpl(out, -1, 1)
graph = make_graph(
[seq_construct_node, seq_concat_node],
[tensor_shape] * 3, # type: ignore
[concat_out_shape], # type: ignore
['X', 'Y', 'Z'],
['out'],
[onnx.TensorProto.FLOAT] * 3, # type: ignore
[onnx.TensorProto.FLOAT],)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model5")
#6th testcase - split and len
seq_split_node = onnx.helper.make_node('SplitToSequence', ['X'], ['seq_1'], axis=-1)
seq_len_node = onnx.helper.make_node('SequenceLength', ['seq_1'], ['len'])
tensor_shape = [2, 3, 4]
len_shape = [] # type: ignore
x = np.ones(tensor_shape, dtype=np.float32)
out = SplitToSequenceImpl(x, axis=-1)
out = SequenceLengthImpl(out)
assert np.array_equal(out, np.int64(4))
graph = onnx.helper.make_graph(
nodes=[seq_split_node, seq_len_node],
name='Sequence',
inputs=[
onnx.helper.make_tensor_value_info(
'X',
onnx.TensorProto.FLOAT,
tensor_shape)],
outputs=[
onnx.helper.make_tensor_value_info(
'len',
onnx.TensorProto.INT64,
len_shape)]) # type: ignore
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[out], name="test_sequence_model6")
#7th testcase - split with keepdims=0, and SequenceAt
seq_split_node = onnx.helper.make_node('SplitToSequence', ['X'], ['seq_1'], axis=0, keepdims=0)
seq_at_node = onnx.helper.make_node('SequenceAt', ['seq_1', 'pos_at'], ['out'])
tensor_shape = [2, 3, 4]
out_shape = [3, 4]
x = np.random.rand(*tensor_shape)
pos_at_val = 1
out = SplitToSequenceImpl(x, axis=0, keepdims=0)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, x[pos_at_val])
pos_at = onnx.helper.make_tensor('pos_at', TensorProto.INT64, (), (pos_at_val, ))
graph = make_graph(
[seq_split_node, seq_at_node],
[tensor_shape, []], # type: ignore
[out_shape], # type: ignore
['X', 'pos_at'],
['out'],
[onnx.TensorProto.DOUBLE, onnx.TensorProto.INT64],
[onnx.TensorProto.DOUBLE],
[pos_at])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[out], name="test_sequence_model7")
#8th testcase - split zero length
seq_split_node = onnx.helper.make_node('SplitToSequence', ['X', 'Splits'], ['seq_1'])
seq_len_node = onnx.helper.make_node('SequenceLength', ['seq_1'], ['len'])
tensor_shape = ['n'] # type: ignore
splits_shape = [3] # type: ignore
x = np.array([]).astype(np.float32)
splits = np.array([0, 0, 0]).astype(np.int64)
out_len = np.int64(3)
graph = onnx.helper.make_graph(
nodes=[seq_split_node, seq_len_node],
name='Sequence',
inputs=[
onnx.helper.make_tensor_value_info(
'X',
onnx.TensorProto.FLOAT,
tensor_shape), # type: ignore
onnx.helper.make_tensor_value_info(
'Splits',
onnx.TensorProto.INT64,
splits_shape)], # type: ignore
outputs=[
onnx.helper.make_tensor_value_info(
'len',
onnx.TensorProto.INT64,
len_shape)]) # type: ignore
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, splits], outputs=[out_len], name="test_sequence_model8")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import onnx.defs
import numpy as np # type: ignore
from onnx import ModelProto
from typing import List, Optional, Text, Sequence
from ..utils import import_recursive
from ..test_case import TestCase
_SimpleModelTestCases = []
def expect(model, # type: ModelProto
inputs, # type: Sequence[np.ndarray]
outputs, # type: Sequence[np.ndarray]
name=None, # type: Optional[Text]
): # type: (...) -> None
name = name or model.graph.name
_SimpleModelTestCases.append(
TestCase(
name=name,
model_name=model.graph.name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind='simple',
rtol=1e-3,
atol=1e-7,
))
base_model_opset_version = 10
BASE_URL = 'https://s3.amazonaws.com/download.onnx/models/opset_{}'.format(
base_model_opset_version)
def collect_testcases(): # type: () -> List[TestCase]
'''Collect model test cases defined in python/numpy code and in model zoo.
'''
real_model_testcases = []
model_tests = [
('test_bvlc_alexnet', 'bvlc_alexnet', 1e-3, 1e-7),
('test_densenet121', 'densenet121', 2e-3, 1e-7),
('test_inception_v1', 'inception_v1', 1e-3, 1e-7),
('test_inception_v2', 'inception_v2', 1e-3, 1e-7),
('test_resnet50', 'resnet50', 1e-3, 1e-7),
('test_shufflenet', 'shufflenet', 1e-3, 1e-7),
('test_squeezenet', 'squeezenet', 1e-3, 1e-7),
('test_vgg19', 'vgg19', 1e-3, 1e-7),
('test_zfnet512', 'zfnet512', 1e-3, 1e-7),
]
for test_name, model_name, rtol, atol in model_tests:
url = '{}/{}.tar.gz'.format(BASE_URL, model_name)
real_model_testcases.append(TestCase(
name=test_name,
model_name=model_name,
url=url,
model_dir=None,
model=None,
data_sets=None,
kind='real',
rtol=rtol,
atol=atol,
))
import_recursive(sys.modules[__name__])
return real_model_testcases + _SimpleModelTestCases
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class SingleRelu(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Relu', ['x'], ['y'], name='test')
graph = onnx.helper.make_graph(
nodes=[node],
name='SingleRelu',
inputs=[onnx.helper.make_tensor_value_info(
'x', onnx.TensorProto.FLOAT, [1, 2])],
outputs=[onnx.helper.make_tensor_value_info(
'y', onnx.TensorProto.FLOAT, [1, 2])])
model = onnx.helper.make_model(graph, producer_name='backend-test')
x = np.random.randn(1, 2).astype(np.float32)
y = np.maximum(x, 0)
expect(model, inputs=[x], outputs=[y],
name='test_single_relu_model')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
from typing import Sequence
class SingleSign(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Sign', ['x'], ['y'], name='test')
x = np.array([-1.0, 4.5, -4.5, 3.1, 0.0, 2.4, -5.5]).astype(np.float32)
y = np.array([-1.0, 1.0, -1.0, 1.0, 0.0, 1.0, -1.0]).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[node],
name='SingleSign',
inputs=[onnx.helper.make_tensor_value_info('x',
onnx.TensorProto.FLOAT,
[7])],
outputs=[onnx.helper.make_tensor_value_info('y',
onnx.TensorProto.FLOAT,
[7])])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y],
name='test_sign_model')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
from typing import Sequence
class ExpandDynamicShape(Base):
@staticmethod
def export(): # type: () -> None
def make_graph(node, input_shape, shape_shape, output_shape): # type: (onnx.helper.NodeProto, Sequence[int], Sequence[int], Sequence[int]) -> onnx.helper.GraphProto
graph = onnx.helper.make_graph(
nodes=[node],
name='Expand',
inputs=[onnx.helper.make_tensor_value_info('X',
onnx.TensorProto.FLOAT,
input_shape),
onnx.helper.make_tensor_value_info('shape',
onnx.TensorProto.INT64,
shape_shape)],
outputs=[onnx.helper.make_tensor_value_info('Y',
onnx.TensorProto.FLOAT,
output_shape)])
return graph
node = onnx.helper.make_node(
'Expand', ['X', 'shape'], ['Y'], name='test')
input_shape = [1, 3, 1]
x = np.ones(input_shape, dtype=np.float32)
#1st testcase
shape = np.array([3, 1], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model1")
#2nd testcase
shape = np.array([1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model2")
#3rd testcase
shape = np.array([3, 1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model3")
#4th testcase
shape = np.array([3, 3, 1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model4")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class GlobalMaxPool(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'GlobalMaxPool',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(1, 3, 5, 5).astype(np.float32)
spatial_shape = np.ndim(x) - 2
y = np.max(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))
for _ in range(spatial_shape):
y = np.expand_dims(y, -1)
expect(node, inputs=[x], outputs=[y], name='test_globalmaxpool')
@staticmethod
def export_globalmaxpool_precomputed(): # type: () -> None
node = onnx.helper.make_node(
'GlobalMaxPool',
inputs=['x'],
outputs=['y'],
)
x = np.array([[[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]]]).astype(np.float32)
y = np.array([[[[9]]]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y], name='test_globalmaxpool_precomputed')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Tile(Base):
@staticmethod
def export_tile(): # type: () -> None
node = onnx.helper.make_node(
'Tile',
inputs=['x', 'y'],
outputs=['z']
)
x = np.random.rand(2, 3, 4, 5).astype(np.float32)
repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)
z = np.tile(x, repeats)
expect(node,
inputs=[x, repeats],
outputs=[z],
name='test_tile')
@staticmethod
def export_tile_precomputed(): # type: () -> None
node = onnx.helper.make_node(
'Tile',
inputs=['x', 'y'],
outputs=['z']
)
x = np.array([
[0, 1],
[2, 3]
], dtype=np.float32)
repeats = np.array([2, 2], dtype=np.int64)
z = np.array([
[0, 1, 0, 1],
[2, 3, 2, 3],
[0, 1, 0, 1],
[2, 3, 2, 3]
], dtype=np.float32)
expect(node,
inputs=[x, repeats],
outputs=[z],
name='test_tile_precomputed')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Hardmax(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
)
x = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(np.float32)
y = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_example')
# For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output
x = np.array([[3, 3, 3, 1]]).astype(np.float32)
y = np.array([[1, 0, 0, 0]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_one_hot')
@staticmethod
def export_hardmax_axis(): # type: () -> None
def hardmax_2d(x): # type: (np.ndarray) -> np.ndarray
return np.eye(x.shape[1], dtype=x.dtype)[np.argmax(x, axis=1)]
x = np.random.randn(3, 4, 5).astype(np.float32)
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=0,
)
y = hardmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_axis_0')
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=1,
)
y = hardmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_axis_1')
# default axis is 1
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_default_axis')
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=2,
)
y = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_axis_2')
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=-1,
)
y = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_negative_axis')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
from ..utils import all_numeric_dtypes
class Max(Base):
@staticmethod
def export(): # type: () -> None
data_0 = np.array([3, 2, 1]).astype(np.float32)
data_1 = np.array([1, 4, 4]).astype(np.float32)
data_2 = np.array([2, 5, 3]).astype(np.float32)
result = np.array([3, 5, 4]).astype(np.float32)
node = onnx.helper.make_node(
'Max',
inputs=['data_0', 'data_1', 'data_2'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1, data_2], outputs=[result],
name='test_max_example')
node = onnx.helper.make_node(
'Max',
inputs=['data_0'],
outputs=['result'],
)
expect(node, inputs=[data_0], outputs=[data_0],
name='test_max_one_input')
result = np.maximum(data_0, data_1)
node = onnx.helper.make_node(
'Max',
inputs=['data_0', 'data_1'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1], outputs=[result],
name='test_max_two_inputs')
@staticmethod
def export_max_all_numeric_types(): # type: () -> None
for op_dtype in all_numeric_dtypes:
data_0 = np.array([3, 2, 1]).astype(op_dtype)
data_1 = np.array([1, 4, 4]).astype(op_dtype)
result = np.array([3, 4, 4]).astype(op_dtype)
node = onnx.helper.make_node(
'Max',
inputs=['data_0', 'data_1'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1], outputs=[result],
name='test_max_{0}'.format(np.dtype(op_dtype).name))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ReduceSum(Base):
@staticmethod
def export_do_not_keepdims(): # type: () -> None
shape = [3, 2, 2]
axes = [1]
keepdims = 0
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
#print(reduced)
#[[4., 6.]
# [12., 14.]
# [20., 22.]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_do_not_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_do_not_keepdims_random')
@staticmethod
def export_keepdims(): # type: () -> None
shape = [3, 2, 2]
axes = [1]
keepdims = 1
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
#print(reduced)
#[[[4., 6.]]
# [[12., 14.]]
# [[20., 22.]]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_keepdims_random')
@staticmethod
def export_default_axes_keepdims(): # type: () -> None
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
keepdims=keepdims)
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)
reduced = np.sum(data, axis=axes, keepdims=keepdims == 1)
#print(reduced)
#[[[78.]]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_default_axes_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(data, axis=axes, keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_default_axes_keepdims_random')
@staticmethod
def export_negative_axes_keepdims(): # type: () -> None
shape = [3, 2, 2]
axes = [-2]
keepdims = 1
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
# print(reduced)
#[[[4., 6.]]
# [[12., 14.]]
# [[20., 22.]]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_negative_axes_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_negative_axes_keepdims_random')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class MatMul(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'MatMul',
inputs=['a', 'b'],
outputs=['c'],
)
# 2d
a = np.random.randn(3, 4).astype(np.float32)
b = np.random.randn(4, 3).astype(np.float32)
c = np.matmul(a, b)
expect(node, inputs=[a, b], outputs=[c],
name='test_matmul_2d')
# 3d
a = np.random.randn(2, 3, 4).astype(np.float32)
b = np.random.randn(2, 4, 3).astype(np.float32)
c = np.matmul(a, b)
expect(node, inputs=[a, b], outputs=[c],
name='test_matmul_3d')
# 4d
a = np.random.randn(1, 2, 3, 4).astype(np.float32)
b = np.random.randn(1, 2, 4, 3).astype(np.float32)
c = np.matmul(a, b)
expect(node, inputs=[a, b], outputs=[c],
name='test_matmul_4d')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Gather(Base):
@staticmethod
def export_gather_0(): # type: () -> None
node = onnx.helper.make_node(
'Gather',
inputs=['data', 'indices'],
outputs=['y'],
axis=0,
)
data = np.random.randn(5, 4, 3, 2).astype(np.float32)
indices = np.array([0, 1, 3])
y = np.take(data, indices, axis=0)
expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],
name='test_gather_0')
@staticmethod
def export_gather_1(): # type: () -> None
node = onnx.helper.make_node(
'Gather',
inputs=['data', 'indices'],
outputs=['y'],
axis=1,
)
data = np.random.randn(5, 4, 3, 2).astype(np.float32)
indices = np.array([0, 1, 3])
y = np.take(data, indices, axis=1)
expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],
name='test_gather_1')
@staticmethod
def export_gather_negative_indices(): # type: () -> None
node = onnx.helper.make_node(
'Gather',
inputs=['data', 'indices'],
outputs=['y'],
axis=0,
)
data = np.arange(10).astype(np.float32)
indices = np.array([0, -9, -10])
y = np.take(data, indices, axis=0)
expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],
name='test_gather_negative_indices')
# print(y)
# [0. 1. 0.]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class MeanVarianceNormalization(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'MeanVarianceNormalization',
inputs=['X'],
outputs=['Y']
)
input_data = np.array([[[[0.8439683], [0.5665144], [0.05836735]],
[[0.02916367], [0.12964272], [0.5060197]],
[[0.79538304], [0.9411346], [0.9546573]]],
[[[0.17730942], [0.46192095], [0.26480448]],
[[0.6746842], [0.01665257], [0.62473077]],
[[0.9240844], [0.9722341], [0.11965699]]],
[[[0.41356155], [0.9129373], [0.59330076]],
[[0.81929934], [0.7862604], [0.11799799]],
[[0.69248444], [0.54119414], [0.07513223]]]], dtype=np.float32)
# Calculate expected output data
data_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1)
data_mean_squared = np.power(data_mean, 2)
data_squared = np.power(input_data, 2)
data_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1)
std = np.sqrt(data_squared_mean - data_mean_squared)
expected_output = (input_data - data_mean) / (std + 1e-9)
expect(node, inputs=[input_data], outputs=[expected_output],
name='test_mvn')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Asinh(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Asinh',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arcsinh(x) # expected output [-0.88137358, 0., 0.88137358]
expect(node, inputs=[x], outputs=[y],
name='test_asinh_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arcsinh(x)
expect(node, inputs=[x], outputs=[y],
name='test_asinh')
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from onnx import TensorProto
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..base import Base
from . import expect
class Cast(Base):
@staticmethod
def export(): # type: () -> None
shape = (3, 4)
test_cases = [
('FLOAT', 'FLOAT16'),
('FLOAT', 'DOUBLE'),
('FLOAT16', 'FLOAT'),
('FLOAT16', 'DOUBLE'),
('DOUBLE', 'FLOAT'),
('DOUBLE', 'FLOAT16'),
('FLOAT', 'STRING'),
('STRING', 'FLOAT'),
]
for from_type, to_type in test_cases:
if 'STRING' != from_type:
input = np.random.random_sample(shape).astype(
TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)])
if ('STRING' == to_type):
# Converting input to str, then give it np.object dtype for generating script
ss = []
for i in input.flatten():
s = str(i).encode('utf-8')
su = s.decode('utf-8')
ss.append(su)
output = np.array(ss).astype(np.object).reshape([3, 4])
else:
output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])
else:
input = np.array([u'0.47892547', u'0.48033667', u'0.49968487', u'0.81910545',
u'0.47031248', u'0.816468', u'0.21087195', u'0.7229038',
u'NaN', u'INF', u'+INF', u'-INF'], dtype=np.dtype(np.object)).reshape([3, 4])
output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])
node = onnx.helper.make_node(
'Cast',
inputs=['input'],
outputs=['output'],
to=getattr(TensorProto, to_type),
)
expect(node, inputs=[input], outputs=[output],
name='test_cast_' + from_type + '_to_' + to_type)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Det(Base):
@staticmethod
def export_2d(): # type: () -> None
node = onnx.helper.make_node(
'Det',
inputs=['x'],
outputs=['y'],
)
x = np.arange(4).reshape(2, 2).astype(np.float32)
y = np.linalg.det(x) # expect -2
expect(node, inputs=[x], outputs=[y],
name='test_det_2d')
@staticmethod
def export_nd(): # type: () -> None
node = onnx.helper.make_node(
'Det',
inputs=['x'],
outputs=['y'],
)
x = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]).astype(np.float32)
y = np.linalg.det(x) # expect array([-2., -3., -8.])
expect(node, inputs=[x], outputs=[y],
name='test_det_nd')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ConvTranspose(Base):
@staticmethod
def export(): # type: () -> None
x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)
[3., 4., 5.],
[6., 7., 8.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"])
y = np.array([[[[0., 1., 3., 3., 2.], # (1, 2, 5, 5)
[3., 8., 15., 12., 7.],
[9., 21., 36., 27., 15.],
[9., 20., 33., 24., 13.],
[6., 13., 21., 15., 8.]],
[[0., 1., 3., 3., 2.],
[3., 8., 15., 12., 7.],
[9., 21., 36., 27., 15.],
[9., 20., 33., 24., 13.],
[6., 13., 21., 15., 8.]]]]).astype(np.float32)
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose')
@staticmethod
def export_convtranspose_1d(): # type: () -> None
x = np.array([[[0., 1., 2.]]]).astype(np.float32) # (1, 1, 3)
W = np.array([[[1., 1., 1.], # (1, 2, 3)
[1., 1., 1.]]]).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"])
y = np.array([[[0., 1., 3., 3., 2.], # (1, 2, 5)
[0., 1., 3., 3., 2.]]]).astype(np.float32)
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_1d')
@staticmethod
def export_convtranspose_3d(): # type: () -> None
x = np.array([[[[[0., 1., 2., 3., 4.], # (1, 1, 3, 4, 5)
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.]],
[[20., 21., 22., 23., 24.],
[25., 26., 27., 28., 29.],
[30., 31., 32., 33., 34.],
[35., 36., 37., 38., 39.]],
[[40., 41., 42., 43., 44.],
[45., 46., 47., 48., 49.],
[50., 51., 52., 53., 54.],
[55., 56., 57., 58., 59.]]]]]).astype(np.float32)
W = np.array([[[[[1., 1., 1.], # (1, 2, 3, 3, 3)
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]],
[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]]]).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"])
y = np.array([[[[[0., 1., 3., 6., 9., 7., 4.], # (1, 2, 5, 6, 7)
[5., 12., 21., 27., 33., 24., 13.],
[15., 33., 54., 63., 72., 51., 27.],
[30., 63., 99., 108., 117., 81., 42.],
[25., 52., 81., 87., 93., 64., 33.],
[15., 31., 48., 51., 54., 37., 19.]],
[[20., 42., 66., 72., 78., 54., 28.],
[50., 104., 162., 174., 186., 128., 66.],
[90., 186., 288., 306., 324., 222., 114.],
[120., 246., 378., 396., 414., 282., 144.],
[90., 184., 282., 294., 306., 208., 106.],
[50., 102., 156., 162., 168., 114., 58.]],
[[60., 123., 189., 198., 207., 141., 72.],
[135., 276., 423., 441., 459., 312., 159.],
[225., 459., 702., 729., 756., 513., 261.],
[270., 549., 837., 864., 891., 603., 306.],
[195., 396., 603., 621., 639., 432., 219.],
[105., 213., 324., 333., 342., 231., 117.]],
[[60., 122., 186., 192., 198., 134., 68.],
[130., 264., 402., 414., 426., 288., 146.],
[210., 426., 648., 666., 684., 462., 234.],
[240., 486., 738., 756., 774., 522., 264.],
[170., 344., 522., 534., 546., 368., 186.],
[90., 182., 276., 282., 288., 194., 98.]],
[[40., 81., 123., 126., 129., 87., 44.],
[85., 172., 261., 267., 273., 184., 93.],
[135., 273., 414., 423., 432., 291., 147.],
[150., 303., 459., 468., 477., 321., 162.],
[105., 212., 321., 327., 333., 224., 113.],
[55., 111., 168., 171., 174., 117., 59.]]],
[[[0., 1., 3., 6., 9., 7., 4.],
[5., 12., 21., 27., 33., 24., 13.],
[15., 33., 54., 63., 72., 51., 27.],
[30., 63., 99., 108., 117., 81., 42.],
[25., 52., 81., 87., 93., 64., 33.],
[15., 31., 48., 51., 54., 37., 19.]],
[[20., 42., 66., 72., 78., 54., 28.],
[50., 104., 162., 174., 186., 128., 66.],
[90., 186., 288., 306., 324., 222., 114.],
[120., 246., 378., 396., 414., 282., 144.],
[90., 184., 282., 294., 306., 208., 106.],
[50., 102., 156., 162., 168., 114., 58.]],
[[60., 123., 189., 198., 207., 141., 72.],
[135., 276., 423., 441., 459., 312., 159.],
[225., 459., 702., 729., 756., 513., 261.],
[270., 549., 837., 864., 891., 603., 306.],
[195., 396., 603., 621., 639., 432., 219.],
[105., 213., 324., 333., 342., 231., 117.]],
[[60., 122., 186., 192., 198., 134., 68.],
[130., 264., 402., 414., 426., 288., 146.],
[210., 426., 648., 666., 684., 462., 234.],
[240., 486., 738., 756., 774., 522., 264.],
[170., 344., 522., 534., 546., 368., 186.],
[90., 182., 276., 282., 288., 194., 98.]],
[[40., 81., 123., 126., 129., 87., 44.],
[85., 172., 261., 267., 273., 184., 93.],
[135., 273., 414., 423., 432., 291., 147.],
[150., 303., 459., 468., 477., 321., 162.],
[105., 212., 321., 327., 333., 224., 113.],
[55., 111., 168., 171., 174., 117., 59.]]]]]).astype(np.float32)
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_3d')
@staticmethod
def export_convtranspose_attributes(): # type: () -> None
x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)
[3., 4., 5.],
[6., 7., 8.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
y = np.array([[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8)
[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]],
[[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"],
strides=[3, 2],
output_shape=[10, 8])
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_output_shape')
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"],
strides=[3, 2],
output_padding=[1, 1])
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_pad')
node = onnx.helper.make_node(
'ConvTranspose', ['X', 'W'], ['Y'],
name='test',
strides=[3, 2],
output_shape=[10, 8],
kernel_shape=[3, 3],
output_padding=[1, 1]
)
expect(node, inputs=[x, W], outputs=[y],
name='test_convtranspose_kernel_shape')
@staticmethod
def export_convtranspose_pads(): # type: () -> None
x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)
[3., 4., 5.],
[6., 7., 8.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"],
strides=[3, 2],
pads=[1, 2, 1, 2])
y = np.array([[[[1., 1., 3.], # (1, 2, 7, 3)
[1., 1., 3.],
[7., 4., 9.],
[7., 4., 9.],
[7., 4., 9.],
[13., 7., 15.],
[13., 7., 15.]],
[[1., 1., 3.],
[1., 1., 3.],
[7., 4., 9.],
[7., 4., 9.],
[7., 4., 9.],
[13., 7., 15.],
[13., 7., 15.]]]]).astype(np.float32)
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_pads')
@staticmethod
def export_convtranspose_dilations(): # type: () -> None
x = np.array([[[[3., 8., 1.], # (1, 1, 3, 3)
[9., 5., 7.],
[3., 2., 6.]]]]).astype(np.float32)
W = np.array([[[[7., 2.], # (1, 1, 2, 2)
[1., 9.]]]]).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], dilations=[2, 2])
y = np.array([[[[21., 56., 13., 16., 2.], # [1, 1, 5, 5]
[63., 35., 67., 10., 14.],
[24., 22., 76., 76., 21.],
[9., 5., 88., 45., 63.],
[3., 2., 33., 18., 54.]]]]).astype(np.float32)
expect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_dilations')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def pow(x, y): # type: ignore
z = np.power(x, y).astype(x.dtype)
return z
class Pow(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = pow(x, y) # expected output [1., 32., 729.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_example')
x = np.arange(60).reshape(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = pow(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_pow')
@staticmethod
def export_pow_broadcast(): # type: () -> None
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array(2).astype(np.float32)
z = pow(x, y) # expected output [1., 4., 9.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_bcast_scalar')
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
y = np.array([1, 2, 3]).astype(np.float32)
# expected output [[1, 4, 27], [4, 25, 216]]
z = pow(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_bcast_array')
@staticmethod
def export_types(): # type: () -> None
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.int64)
z = pow(x, y) # expected output [1., 32., 729.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_float32_int64')
x = np.array([1, 2, 3]).astype(np.int64)
y = np.array([4, 5, 6]).astype(np.float32)
z = pow(x, y) # expected output [1, 32, 729]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_int64_float32')
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.int32)
z = pow(x, y) # expected output [1., 32., 729.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_float32_int32')
x = np.array([1, 2, 3]).astype(np.int32)
y = np.array([4, 5, 6]).astype(np.float32)
z = pow(x, y) # expected output [1, 32, 729]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_int32_float32')
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.uint64)
z = pow(x, y) # expected output [1., 32., 729.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_float32_uint64')
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.uint32)
z = pow(x, y) # expected output [1., 32., 729.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_float32_uint32')
x = np.array([1, 2, 3]).astype(np.int64)
y = np.array([4, 5, 6]).astype(np.int64)
z = pow(x, y) # expected output [1, 32, 729]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_int64_int64')
x = np.array([1, 2, 3]).astype(np.int32)
y = np.array([4, 5, 6]).astype(np.int32)
z = pow(x, y) # expected output [1, 32, 729]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_types_int32_int32')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Acosh(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Acosh',
inputs=['x'],
outputs=['y'],
)
x = np.array([10, np.e, 1]).astype(np.float32)
y = np.arccosh(x) # expected output [2.99322295, 1.65745449, 0.]
expect(node, inputs=[x], outputs=[y],
name='test_acosh_example')
x = np.random.uniform(1.0, 10.0, (3, 4, 5)).astype(np.float32)
y = np.arccosh(x)
expect(node, inputs=[x], outputs=[y],
name='test_acosh')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class InstanceNormalization(Base):
@staticmethod
def export(): # type: () -> None
def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore
dims_x = len(x.shape)
axis = tuple(range(2, dims_x))
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# input size: (1, 2, 1, 3)
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
y = _instancenorm_test_mode(x, s, bias).astype(np.float32)
node = onnx.helper.make_node(
'InstanceNormalization',
inputs=['x', 's', 'bias'],
outputs=['y'],
)
# output size: (1, 2, 1, 3)
expect(node, inputs=[x, s, bias], outputs=[y],
name='test_instancenorm_example')
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
epsilon = 1e-2
y = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)
node = onnx.helper.make_node(
'InstanceNormalization',
inputs=['x', 's', 'bias'],
outputs=['y'],
epsilon=epsilon,
)
# output size: (2, 3, 4, 5)
expect(node, inputs=[x, s, bias], outputs=[y],
name='test_instancenorm_epsilon')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ThresholdedRelu(Base):
@staticmethod
def export(): # type: () -> None
alpha = 2.0
node = onnx.helper.make_node(
'ThresholdedRelu',
inputs=['x'],
outputs=['y'],
alpha=alpha
)
x = np.array([-1.5, 0., 1.2, 2.0, 2.2]).astype(np.float32)
y = np.clip(x, alpha, np.inf) # expected output [0., 0., 0., 0., 2.2]
y[y == alpha] = 0
expect(node, inputs=[x], outputs=[y],
name='test_thresholdedrelu_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, alpha, np.inf)
y[y == alpha] = 0
expect(node, inputs=[x], outputs=[y],
name='test_thresholdedrelu')
@staticmethod
def export_default(): # type: () -> None
default_alpha = 1.0
node = onnx.helper.make_node(
'ThresholdedRelu',
inputs=['x'],
outputs=['y']
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, default_alpha, np.inf)
y[y == default_alpha] = 0
expect(node, inputs=[x], outputs=[y],
name='test_thresholdedrelu_default')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Neg(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Neg',
inputs=['x'],
outputs=['y'],
)
x = np.array([-4, 2]).astype(np.float32)
y = np.negative(x) # expected output [4., -2.],
expect(node, inputs=[x], outputs=[y],
name='test_neg_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.negative(x)
expect(node, inputs=[x], outputs=[y],
name='test_neg')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Tanh(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Tanh',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]
expect(node, inputs=[x], outputs=[y],
name='test_tanh_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.tanh(x)
expect(node, inputs=[x], outputs=[y],
name='test_tanh')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class IsInf(Base):
@staticmethod
def export_infinity(): # type: () -> None
node = onnx.helper.make_node('IsInf',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
dtype=np.float32)
y = np.isinf(x)
expect(node, inputs=[x], outputs=[y], name='test_isinf')
@staticmethod
def export_positive_infinity_only(): # type: () -> None
node = onnx.helper.make_node('IsInf',
inputs=['x'],
outputs=['y'],
detect_negative=0
)
x = np.array([-1.7, np.nan, np.inf, 3.6, np.NINF, np.inf],
dtype=np.float32)
y = np.isposinf(x)
expect(node, inputs=[x], outputs=[y], name='test_isinf_positive')
@staticmethod
def export_negative_infinity_only(): # type: () -> None
node = onnx.helper.make_node('IsInf',
inputs=['x'],
outputs=['y'],
detect_positive=0
)
x = np.array([-1.7, np.nan, np.inf, -3.6, np.NINF, np.inf],
dtype=np.float32)
y = np.isneginf(x)
expect(node, inputs=[x], outputs=[y], name='test_isinf_negative')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Flatten(Base):
@staticmethod
def export(): # type: () -> None
shape = (2, 3, 4, 5)
a = np.random.random_sample(shape).astype(np.float32)
for i in range(len(shape)):
node = onnx.helper.make_node(
'Flatten',
inputs=['a'],
outputs=['b'],
axis=i,
)
new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)
b = np.reshape(a, new_shape)
expect(node, inputs=[a], outputs=[b],
name='test_flatten_axis' + str(i))
@staticmethod
def export_flatten_with_default_axis(): # type: () -> None
node = onnx.helper.make_node(
'Flatten',
inputs=['a'],
outputs=['b'], # Default value for axis: axis=1
)
shape = (5, 4, 3, 2)
a = np.random.random_sample(shape).astype(np.float32)
new_shape = (5, 24)
b = np.reshape(a, new_shape)
expect(node, inputs=[a], outputs=[b],
name='test_flatten_default_axis')
@staticmethod
def export_flatten_negative_axis(): # type: () -> None
shape = (2, 3, 4, 5)
a = np.random.random_sample(shape).astype(np.float32)
for i in range(-len(shape), 0):
node = onnx.helper.make_node(
'Flatten',
inputs=['a'],
outputs=['b'],
axis=i,
)
new_shape = (np.prod(shape[0:i]).astype(int), -1)
b = np.reshape(a, new_shape)
expect(node, inputs=[a], outputs=[b],
name='test_flatten_negative_axis' + str(abs(i)))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Equal(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Equal',
inputs=['x', 'y'],
outputs=['z'],
)
x = (np.random.randn(3, 4, 5) * 10).astype(np.int32)
y = (np.random.randn(3, 4, 5) * 10).astype(np.int32)
z = np.equal(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_equal')
@staticmethod
def export_equal_broadcast(): # type: () -> None
node = onnx.helper.make_node(
'Equal',
inputs=['x', 'y'],
outputs=['z'],
)
x = (np.random.randn(3, 4, 5) * 10).astype(np.int32)
y = (np.random.randn(5) * 10).astype(np.int32)
z = np.equal(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_equal_bcast')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Softsign(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Softsign',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.array([-0.5, 0, 0.5]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_softsign_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = x / (1 + np.abs(x))
expect(node, inputs=[x], outputs=[y],
name='test_softsign')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Shrink(Base):
@staticmethod
def export_hard_shrink(): # type: () -> None
node = onnx.helper.make_node(
'Shrink',
inputs=['x'],
outputs=['y'],
lambd=1.5,
)
X = np.arange(-2.0, 2.1, dtype=np.float32)
Y = np.array([-2, 0, 0, 0, 2], dtype=np.float32)
expect(node, inputs=[X], outputs=[Y],
name='test_shrink_hard')
@staticmethod
def export_soft_shrink(): # type: () -> None
node = onnx.helper.make_node(
'Shrink',
inputs=['x'],
outputs=['y'],
lambd=1.5,
bias=1.5,
)
X = np.arange(-2.0, 2.1, dtype=np.float32)
Y = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)
expect(node, inputs=[X], outputs=[Y],
name='test_shrink_soft')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Add(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Add',
inputs=['x', 'y'],
outputs=['sum'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
expect(node, inputs=[x, y], outputs=[x + y],
name='test_add')
@staticmethod
def export_add_broadcast(): # type: () -> None
node = onnx.helper.make_node(
'Add',
inputs=['x', 'y'],
outputs=['sum'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
expect(node, inputs=[x, y], outputs=[x + y],
name='test_add_bcast')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class NonZero(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'NonZero',
inputs=['condition'],
outputs=['result'],
)
condition = np.array([[1, 0], [1, 1]], dtype=np.bool)
result = np.array((np.nonzero(condition))) # expected output [[0, 1, 1], [0, 0, 1]]
expect(node, inputs=[condition], outputs=[result],
name='test_nonzero_example')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class BatchNormalization(Base):
@staticmethod
def export(): # type: () -> None
def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# input size: (1, 2, 1, 3)
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
mean = np.array([0, 3]).astype(np.float32)
var = np.array([1, 1.5]).astype(np.float32)
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
)
# output size: (1, 2, 1, 3)
expect(node, inputs=[x, s, bias, mean, var], outputs=[y],
name='test_batchnorm_example')
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
mean = np.random.randn(3).astype(np.float32)
var = np.random.rand(3).astype(np.float32)
epsilon = 1e-2
y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
epsilon=epsilon,
)
# output size: (2, 3, 4, 5)
expect(node, inputs=[x, s, bias, mean, var], outputs=[y],
name='test_batchnorm_epsilon')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class IsNaN(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'IsNaN',
inputs=['x'],
outputs=['y'],
)
x = np.array([3.0, np.nan, 4.0, np.nan], dtype=np.float32)
y = np.isnan(x)
expect(node, inputs=[x], outputs=[y], name='test_isnan')
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class StringNormalizer(Base):
@staticmethod
def export_nostopwords_nochangecase(): # type: () -> None
input = np.array([u'monday', u'tuesday']).astype(np.object)
output = input
# No stopwords. This is a NOOP
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
is_case_sensitive=1,
)
expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_nostopwords_nochangecase')
@staticmethod
def export_monday_casesensintive_nochangecase(): # type: () -> None
input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
output = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
is_case_sensitive=1,
stopwords=stopwords
)
expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_nochangecase')
@staticmethod
def export_monday_casesensintive_lower(): # type: () -> None
input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
output = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='LOWER',
is_case_sensitive=1,
stopwords=stopwords
)
expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_lower')
@staticmethod
def export_monday_casesensintive_upper(): # type: () -> None
input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
output = np.array([u'TUESDAY', u'WEDNESDAY', u'THURSDAY']).astype(np.object)
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
is_case_sensitive=1,
stopwords=stopwords
)
expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_upper')
@staticmethod
def export_monday_empty_output(): # type: () -> None
input = np.array([u'monday', u'monday']).astype(np.object)
output = np.array([u'']).astype(np.object)
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
is_case_sensitive=1,
stopwords=stopwords
)
expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_empty_output')
@staticmethod
def export_monday_insensintive_upper_twodim(): # type: () -> None
input = np.array([u'Monday', u'tuesday', u'wednesday', u'Monday', u'tuesday', u'wednesday']).astype(np.object).reshape([1, 6])
# It does upper case cecedille, accented E
# and german umlaut but fails
# with german eszett
output = np.array([u'TUESDAY', u'WEDNESDAY', u'TUESDAY', u'WEDNESDAY']).astype(np.object).reshape([1, 4])
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
stopwords=stopwords
)
expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_insensintive_upper_twodim')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Slice(Base):
@staticmethod
def export_slice(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[0:3, 0:10]
starts = np.array([0, 0], dtype=np.int64)
ends = np.array([3, 10], dtype=np.int64)
axes = np.array([0, 1], dtype=np.int64)
steps = np.array([1, 1], dtype=np.int64)
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice')
@staticmethod
def export_slice_neg(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0], dtype=np.int64)
ends = np.array([-1], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 0:-1]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_neg')
@staticmethod
def export_slice_start_out_of_bounds(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([1000], dtype=np.int64)
ends = np.array([1000], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 1000:1000]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_start_out_of_bounds')
@staticmethod
def export_slice_end_out_of_bounds(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([1], dtype=np.int64)
ends = np.array([1000], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 1:1000]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_end_out_of_bounds')
@staticmethod
def export_slice_default_axes(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends], outputs=[y],
name='test_slice_default_axes')
@staticmethod
def export_slice_default_steps(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
axes = np.array([0, 1, 2], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends, axes], outputs=[y],
name='test_slice_default_steps')
@staticmethod
def export_slice_neg_steps(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([20, 10, 4], dtype=np.int64)
ends = np.array([0, 0, 1], dtype=np.int64)
axes = np.array([0, 1, 2], dtype=np.int64)
steps = np.array([-1, -3, -2])
y = x[20:0:-1, 10:0:-3, 4:1:-2]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_neg_steps')
@staticmethod
def export_slice_negative_axes(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
axes = np.array([0, -2, -1], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends, axes], outputs=[y],
name='test_slice_negative_axes')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Sqrt(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Sqrt',
inputs=['x'],
outputs=['y'],
)
x = np.array([1, 4, 9]).astype(np.float32)
y = np.sqrt(x) # expected output [1., 2., 3.]
expect(node, inputs=[x], outputs=[y],
name='test_sqrt_example')
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = np.sqrt(x)
expect(node, inputs=[x], outputs=[y],
name='test_sqrt')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Less(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x', 'y'],
outputs=['less_equal'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = np.less_equal(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_less_equal')
@staticmethod
def export_less_broadcast(): # type: () -> None
node = onnx.helper.make_node(
'LessOrEqual',
inputs=['x', 'y'],
outputs=['less_equal'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
z = np.less_equal(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_less_equal_bcast')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Div(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Div',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([3, 4]).astype(np.float32)
y = np.array([1, 2]).astype(np.float32)
z = x / y # expected output [3., 2.]
expect(node, inputs=[x, y], outputs=[z],
name='test_div_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(3, 4, 5).astype(np.float32) + 1.0
z = x / y
expect(node, inputs=[x, y], outputs=[z],
name='test_div')
@staticmethod
def export_div_broadcast(): # type: () -> None
node = onnx.helper.make_node(
'Div',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(5).astype(np.float32) + 1.0
z = x / y
expect(node, inputs=[x, y], outputs=[z],
name='test_div_bcast')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ReduceLogSum(Base):
@staticmethod
def export_nokeepdims(): # type: () -> None
node = onnx.helper.make_node(
'ReduceLogSum',
inputs=['data'],
outputs=["reduced"],
axes=[2, 1],
keepdims=0
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(2, 1), keepdims=False))
expect(node, inputs=[data], outputs=[reduced],
name='test_reduce_log_sum_desc_axes')
node = onnx.helper.make_node(
'ReduceLogSum',
inputs=['data'],
outputs=["reduced"],
axes=[0, 1],
keepdims=0
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(0, 1), keepdims=False))
expect(node, inputs=[data], outputs=[reduced],
name='test_reduce_log_sum_asc_axes')
@staticmethod
def export_keepdims(): # type: () -> None
node = onnx.helper.make_node(
'ReduceLogSum',
inputs=['data'],
outputs=["reduced"]
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, keepdims=True))
expect(node, inputs=[data], outputs=[reduced],
name='test_reduce_log_sum_default')
@staticmethod
def export_negative_axes_keepdims(): # type: () -> None
node = onnx.helper.make_node(
'ReduceLogSum',
inputs=['data'],
outputs=["reduced"],
axes=[-2]
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(-2), keepdims=True))
# print(reduced)
expect(node, inputs=[data], outputs=[reduced],
name='test_reduce_log_sum_negative_axes')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Range(Base):
@staticmethod
def export_range_float_type_positive_delta(): # type: () -> None
node = onnx.helper.make_node(
'Range',
inputs=['start', 'limit', 'delta'],
outputs=['output'],
)
start = np.float32(1)
limit = np.float32(5)
delta = np.float32(2)
output = np.arange(start, limit, delta, dtype=np.float32) # expected output [1.0, 3.0]
expect(node, inputs=[start, limit, delta], outputs=[output],
name='test_range_float_type_positive_delta')
@staticmethod
def export_range_int32_type_negative_delta(): # type: () -> None
node = onnx.helper.make_node(
'Range',
inputs=['start', 'limit', 'delta'],
outputs=['output'],
)
start = np.int32(10)
limit = np.int32(6)
delta = np.int32(-3)
output = np.arange(start, limit, delta, dtype=np.int32) # expected output [10, 7]
expect(node, inputs=[start, limit, delta], outputs=[output],
name='test_range_int32_type_negative_delta')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.