diff --git a/ckpts/universal/global_step120/zero/13.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/13.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2731f4dd0c49e780f55382bf86c18ead639f06ba --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab559fe2e421839be6562b68ddd5448fffc38334078112e1ee655fea06d4bab1 +size 50332843 diff --git a/ckpts/universal/global_step120/zero/13.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/13.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..7b1cab94438c5b14a7528152785d12dc026f4c81 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff3433a9244dc6e0e3c85a85270a490863b81102c8c0ba6e471e42af102523ad +size 50332749 diff --git a/ckpts/universal/global_step120/zero/17.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/17.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a6b89022103575ff3d11ca2d44001a886f95455c --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:263382d018932e3161eff18b2b308c104205392506b06e9c14013d7a307edab1 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04ec084f6eb0175f883214187a89d527800de0fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/__init__.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py new file mode 100644 index 0000000000000000000000000000000000000000..25e1c1cb72998f647cc50cc700ca35cf2f7b03fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py @@ -0,0 +1,131 @@ +"""An internal wrapper for the beartype library. + +The module returns a no-op decorator when the beartype library is not installed. +""" +import enum +import functools +import os +import traceback +import typing +import warnings +from types import ModuleType + +try: + import beartype as _beartype_lib # type: ignore[import] + from beartype import roar as _roar # type: ignore[import] + + # Beartype warns when we import from typing because the types are deprecated + # in Python 3.9. But there will be a long time until we can move to using + # the native container types for type annotations (when 3.9 is the lowest + # supported version). So we silence the warning. + warnings.filterwarnings( + "ignore", + category=_roar.BeartypeDecorHintPep585DeprecationWarning, + ) + + if _beartype_lib.__version__ == "0.16.0": + # beartype 0.16.0 has a bug that causes it to crash when used with + # PyTorch. See https://github.com/beartype/beartype/issues/282 + warnings.warn("beartype 0.16.0 is not supported. Please upgrade to 0.16.1+.") + _beartype_lib = None # type: ignore[assignment] +except ImportError: + _beartype_lib = None # type: ignore[assignment] +except Exception as e: + # Warn errors that are not import errors (unexpected). + warnings.warn(f"{e}") + _beartype_lib = None # type: ignore[assignment] + + +@enum.unique +class RuntimeTypeCheckState(enum.Enum): + """Runtime type check state.""" + + # Runtime type checking is disabled. + DISABLED = enum.auto() + # Runtime type checking is enabled but warnings are shown only. + WARNINGS = enum.auto() + # Runtime type checking is enabled. + ERRORS = enum.auto() + + +class CallHintViolationWarning(UserWarning): + """Warning raised when a type hint is violated during a function call.""" + + pass + + +def _no_op_decorator(func): + return func + + +def _create_beartype_decorator( + runtime_check_state: RuntimeTypeCheckState, +): + # beartype needs to be imported outside of the function and aliased because + # this module overwrites the name "beartype". + + if runtime_check_state == RuntimeTypeCheckState.DISABLED: + return _no_op_decorator + if _beartype_lib is None: + # If the beartype library is not installed, return a no-op decorator + return _no_op_decorator + + assert isinstance(_beartype_lib, ModuleType) + + if runtime_check_state == RuntimeTypeCheckState.ERRORS: + # Enable runtime type checking which errors on any type hint violation. + return _beartype_lib.beartype + + # Warnings only + def beartype(func): + """Warn on type hint violation.""" + + if "return" in func.__annotations__: + # Remove the return type from the func function's + # annotations so that the beartype decorator does not complain + # about the return type. + return_type = func.__annotations__["return"] + del func.__annotations__["return"] + beartyped = _beartype_lib.beartype(func) + # Restore the return type to the func function's annotations + func.__annotations__["return"] = return_type + else: + beartyped = _beartype_lib.beartype(func) + + @functools.wraps(func) + def _coerce_beartype_exceptions_to_warnings(*args, **kwargs): + try: + return beartyped(*args, **kwargs) + except _roar.BeartypeCallHintParamViolation: + # Fall back to the original function if the beartype hint is violated. + warnings.warn( + traceback.format_exc(), + category=CallHintViolationWarning, + stacklevel=2, + ) + + return func(*args, **kwargs) # noqa: B012 + + return _coerce_beartype_exceptions_to_warnings + + return beartype + + +if typing.TYPE_CHECKING: + # This is a hack to make mypy play nicely with the beartype decorator. + def beartype(func): + return func + +else: + _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK = os.getenv( + "TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK" + ) + if _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK == "ERRORS": + _runtime_type_check_state = RuntimeTypeCheckState.ERRORS + elif _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK == "DISABLED": + _runtime_type_check_state = RuntimeTypeCheckState.DISABLED + else: + _runtime_type_check_state = RuntimeTypeCheckState.WARNINGS + beartype = _create_beartype_decorator(_runtime_type_check_state) + # Make sure that the beartype decorator is enabled whichever path we took. + assert beartype is not None diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cae5b247d5cd5b88511c5be0346170bfce535969 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py @@ -0,0 +1,21 @@ +from ._diagnostic import ( + create_export_diagnostic_context, + diagnose, + engine, + export_context, + ExportDiagnosticEngine, + TorchScriptOnnxExportDiagnostic, +) +from ._rules import rules +from .infra import levels + +__all__ = [ + "TorchScriptOnnxExportDiagnostic", + "ExportDiagnosticEngine", + "rules", + "levels", + "engine", + "export_context", + "create_export_diagnostic_context", + "diagnose", +] diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py new file mode 100644 index 0000000000000000000000000000000000000000..09079d5e9c4a47a43af82c3b36be736eee4f3370 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py @@ -0,0 +1,212 @@ +"""Diagnostic components for TorchScript based ONNX export, i.e. `torch.onnx.export`.""" +from __future__ import annotations + +import contextlib +import gzip +from collections.abc import Generator +from typing import List, Optional + +import torch + +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.diagnostics.infra import formatter, sarif +from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version +from torch.utils import cpp_backtrace + + +def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack: + """Returns the current C++ call stack. + + This function utilizes `torch.utils.cpp_backtrace` to get the current C++ call stack. + The returned C++ call stack is a concatenated string of the C++ call stack frames. + Each frame is separated by a newline character, in the same format of + r"frame #[0-9]+: (?P.*)". More info at `c10/util/Backtrace.cpp`. + + """ + # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. + frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split("\n") + frame_messages = [] + for frame in frames: + segments = frame.split(":", 1) + if len(segments) == 2: + frame_messages.append(segments[1].strip()) + else: + frame_messages.append("") + return infra.Stack( + frames=[ + infra.StackFrame(location=infra.Location(message=message)) + for message in frame_messages + ] + ) + + +class TorchScriptOnnxExportDiagnostic(infra.Diagnostic): + """Base class for all export diagnostics. + + This class is used to represent all export diagnostics. It is a subclass of + infra.Diagnostic, and adds additional methods to add more information to the + diagnostic. + """ + + python_call_stack: Optional[infra.Stack] = None + cpp_call_stack: Optional[infra.Stack] = None + + def __init__( + self, + *args, + frames_to_skip: int = 1, + cpp_stack: bool = False, + **kwargs, + ) -> None: + super().__init__(*args, **kwargs) + self.python_call_stack = self.record_python_call_stack( + frames_to_skip=frames_to_skip + ) + if cpp_stack: + self.cpp_call_stack = self.record_cpp_call_stack( + frames_to_skip=frames_to_skip + ) + + def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack: + """Records the current C++ call stack in the diagnostic.""" + # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. + # No need to skip this function because python frame is not recorded + # in cpp call stack. + stack = _cpp_call_stack(frames_to_skip=frames_to_skip) + stack.message = "C++ call stack" + self.with_stack(stack) + return stack + + +class ExportDiagnosticEngine: + """PyTorch ONNX Export diagnostic engine. + + The only purpose of creating this class instead of using `DiagnosticContext` directly + is to provide a background context for `diagnose` calls inside exporter. + + By design, one `torch.onnx.export` call should initialize one diagnostic context. + All `diagnose` calls inside exporter should be made in the context of that export. + However, since diagnostic context is currently being accessed via a global variable, + there is no guarantee that the context is properly initialized. Therefore, we need + to provide a default background context to fallback to, otherwise any invocation of + exporter internals, e.g. unit tests, will fail due to missing diagnostic context. + This can be removed once the pipeline for context to flow through the exporter is + established. + """ + + contexts: List[infra.DiagnosticContext] + _background_context: infra.DiagnosticContext + + def __init__(self) -> None: + self.contexts = [] + self._background_context = infra.DiagnosticContext( + name="torch.onnx", + version=torch.__version__, + ) + + @property + def background_context(self) -> infra.DiagnosticContext: + return self._background_context + + def create_diagnostic_context( + self, + name: str, + version: str, + options: Optional[infra.DiagnosticOptions] = None, + ) -> infra.DiagnosticContext: + """Creates a new diagnostic context. + + Args: + name: The subject name for the diagnostic context. + version: The subject version for the diagnostic context. + options: The options for the diagnostic context. + + Returns: + A new diagnostic context. + """ + if options is None: + options = infra.DiagnosticOptions() + context: infra.DiagnosticContext[infra.Diagnostic] = infra.DiagnosticContext( + name, version, options + ) + self.contexts.append(context) + return context + + def clear(self): + """Clears all diagnostic contexts.""" + self.contexts.clear() + self._background_context.diagnostics.clear() + + def to_json(self) -> str: + return formatter.sarif_to_json(self.sarif_log()) + + def dump(self, file_path: str, compress: bool = False) -> None: + """Dumps the SARIF log to a file.""" + if compress: + with gzip.open(file_path, "wt") as f: + f.write(self.to_json()) + else: + with open(file_path, "w") as f: + f.write(self.to_json()) + + def sarif_log(self): + log = sarif.SarifLog( + version=sarif_version.SARIF_VERSION, + schema_uri=sarif_version.SARIF_SCHEMA_LINK, + runs=[context.sarif() for context in self.contexts], + ) + + log.runs.append(self._background_context.sarif()) + return log + + +engine = ExportDiagnosticEngine() +_context = engine.background_context + + +@contextlib.contextmanager +def create_export_diagnostic_context() -> ( + Generator[infra.DiagnosticContext, None, None] +): + """Create a diagnostic context for export. + + This is a workaround for code robustness since diagnostic context is accessed by + export internals via global variable. See `ExportDiagnosticEngine` for more details. + """ + global _context + assert ( + _context == engine.background_context + ), "Export context is already set. Nested export is not supported." + _context = engine.create_diagnostic_context( + "torch.onnx.export", + torch.__version__, + ) + try: + yield _context + finally: + _context = engine.background_context + + +def diagnose( + rule: infra.Rule, + level: infra.Level, + message: Optional[str] = None, + frames_to_skip: int = 2, + **kwargs, +) -> TorchScriptOnnxExportDiagnostic: + """Creates a diagnostic and record it in the global diagnostic context. + + This is a wrapper around `context.log` that uses the global diagnostic + context. + """ + # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. + diagnostic = TorchScriptOnnxExportDiagnostic( + rule, level, message, frames_to_skip=frames_to_skip, **kwargs + ) + export_context().log(diagnostic) + return diagnostic + + +def export_context() -> infra.DiagnosticContext: + global _context + return _context diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..0bfda96c5bce3c8fb3b1884e2d953e17ded3bf34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py @@ -0,0 +1,634 @@ +""" +GENERATED CODE - DO NOT EDIT DIRECTLY +This file is generated by gen_diagnostics.py. +See tools/onnx/gen_diagnostics.py for more information. + +Diagnostic rules for PyTorch ONNX export. +""" + +import dataclasses +from typing import Tuple + +# flake8: noqa +from torch.onnx._internal.diagnostics import infra + +""" +GENERATED CODE - DO NOT EDIT DIRECTLY +The purpose of generating a class for each rule is to override the `format_message` +method to provide more details in the signature about the format arguments. +""" + + +class _NodeMissingOnnxShapeInference(infra.Rule): + """Node is missing ONNX shape inference.""" + + def format_message(self, op_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.' + """ + return self.message_default_template.format(op_name=op_name) + + def format( # type: ignore[override] + self, level: infra.Level, op_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.' + """ + return self, level, self.format_message(op_name=op_name) + + +class _MissingCustomSymbolicFunction(infra.Rule): + """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.""" + + def format_message(self, op_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.' + """ + return self.message_default_template.format(op_name=op_name) + + def format( # type: ignore[override] + self, level: infra.Level, op_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.' + """ + return self, level, self.format_message(op_name=op_name) + + +class _MissingStandardSymbolicFunction(infra.Rule): + """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.""" + + def format_message( # type: ignore[override] + self, op_name, opset_version, issue_url + ) -> str: + """Returns the formatted default message of this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." + """ + return self.message_default_template.format( + op_name=op_name, opset_version=opset_version, issue_url=issue_url + ) + + def format( # type: ignore[override] + self, level: infra.Level, op_name, opset_version, issue_url + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." + """ + return ( + self, + level, + self.format_message( + op_name=op_name, opset_version=opset_version, issue_url=issue_url + ), + ) + + +class _OperatorSupportedInNewerOpsetVersion(infra.Rule): + """Operator is supported in newer opset version.""" + + def format_message( # type: ignore[override] + self, op_name, opset_version, supported_opset_version + ) -> str: + """Returns the formatted default message of this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." + """ + return self.message_default_template.format( + op_name=op_name, + opset_version=opset_version, + supported_opset_version=supported_opset_version, + ) + + def format( # type: ignore[override] + self, level: infra.Level, op_name, opset_version, supported_opset_version + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." + """ + return ( + self, + level, + self.format_message( + op_name=op_name, + opset_version=opset_version, + supported_opset_version=supported_opset_version, + ), + ) + + +class _FxGraphToOnnx(infra.Rule): + """Transforms graph from FX IR to ONNX IR.""" + + def format_message(self, graph_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Transforming FX graph {graph_name} to ONNX graph.' + """ + return self.message_default_template.format(graph_name=graph_name) + + def format( # type: ignore[override] + self, level: infra.Level, graph_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Transforming FX graph {graph_name} to ONNX graph.' + """ + return self, level, self.format_message(graph_name=graph_name) + + +class _FxNodeToOnnx(infra.Rule): + """Transforms an FX node to an ONNX node.""" + + def format_message(self, node_repr) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Transforming FX node {node_repr} to ONNX node.' + """ + return self.message_default_template.format(node_repr=node_repr) + + def format( # type: ignore[override] + self, level: infra.Level, node_repr + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Transforming FX node {node_repr} to ONNX node.' + """ + return self, level, self.format_message(node_repr=node_repr) + + +class _FxPass(infra.Rule): + """FX graph transformation during ONNX export before converting from FX IR to ONNX IR.""" + + def format_message(self, pass_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Running {pass_name} pass.' + """ + return self.message_default_template.format(pass_name=pass_name) + + def format( # type: ignore[override] + self, level: infra.Level, pass_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Running {pass_name} pass.' + """ + return self, level, self.format_message(pass_name=pass_name) + + +class _NoSymbolicFunctionForCallFunction(infra.Rule): + """Cannot find symbolic function to convert the "call_function" FX node to ONNX.""" + + def format_message(self, target) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. ' + """ + return self.message_default_template.format(target=target) + + def format( # type: ignore[override] + self, level: infra.Level, target + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. ' + """ + return self, level, self.format_message(target=target) + + +class _UnsupportedFxNodeAnalysis(infra.Rule): + """Result from FX graph analysis to reveal unsupported FX nodes.""" + + def format_message( # type: ignore[override] + self, node_op_to_target_mapping + ) -> str: + """Returns the formatted default message of this Rule. + + Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. ' + """ + return self.message_default_template.format( + node_op_to_target_mapping=node_op_to_target_mapping + ) + + def format( # type: ignore[override] + self, level: infra.Level, node_op_to_target_mapping + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. ' + """ + return ( + self, + level, + self.format_message(node_op_to_target_mapping=node_op_to_target_mapping), + ) + + +class _OpLevelDebugging(infra.Rule): + """Report any op level validation failure in warnings.""" + + def format_message(self, node, symbolic_fn) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.' + """ + return self.message_default_template.format(node=node, symbolic_fn=symbolic_fn) + + def format( # type: ignore[override] + self, level: infra.Level, node, symbolic_fn + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.' + """ + return self, level, self.format_message(node=node, symbolic_fn=symbolic_fn) + + +class _FindOpschemaMatchedSymbolicFunction(infra.Rule): + """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas.""" + + def format_message(self, symbolic_fn, node) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.' + """ + return self.message_default_template.format(symbolic_fn=symbolic_fn, node=node) + + def format( # type: ignore[override] + self, level: infra.Level, symbolic_fn, node + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.' + """ + return self, level, self.format_message(symbolic_fn=symbolic_fn, node=node) + + +class _FxNodeInsertTypePromotion(infra.Rule): + """Determine if type promotion is required for the FX node. Insert cast nodes if needed.""" + + def format_message(self, target) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Performing explicit type promotion for node {target}. ' + """ + return self.message_default_template.format(target=target) + + def format( # type: ignore[override] + self, level: infra.Level, target + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Performing explicit type promotion for node {target}. ' + """ + return self, level, self.format_message(target=target) + + +class _FindOperatorOverloadsInOnnxRegistry(infra.Rule): + """Find the list of OnnxFunction of the PyTorch operator in onnx registry.""" + + def format_message(self, node) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Checking if the FX node: {node} is supported in onnx registry.' + """ + return self.message_default_template.format(node=node) + + def format( # type: ignore[override] + self, level: infra.Level, node + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Checking if the FX node: {node} is supported in onnx registry.' + """ + return self, level, self.format_message(node=node) + + +@dataclasses.dataclass +class _POERules(infra.RuleCollection): + node_missing_onnx_shape_inference: _NodeMissingOnnxShapeInference = dataclasses.field( + default=_NodeMissingOnnxShapeInference.from_sarif( + **{ + "id": "POE0001", + "name": "node-missing-onnx-shape-inference", + "short_description": {"text": "Node is missing ONNX shape inference."}, + "full_description": { + "text": "Node is missing ONNX shape inference. This usually happens when the node is not valid under standard ONNX operator spec.", + "markdown": "Node is missing ONNX shape inference.\nThis usually happens when the node is not valid under standard ONNX operator spec.\n", + }, + "message_strings": { + "default": { + "text": "The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Node is missing ONNX shape inference.""" + + missing_custom_symbolic_function: _MissingCustomSymbolicFunction = dataclasses.field( + default=_MissingCustomSymbolicFunction.from_sarif( + **{ + "id": "POE0002", + "name": "missing-custom-symbolic-function", + "short_description": { + "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX." + }, + "full_description": { + "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.", + "markdown": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.\n", + }, + "message_strings": { + "default": { + "text": "ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.""" + + missing_standard_symbolic_function: _MissingStandardSymbolicFunction = dataclasses.field( + default=_MissingStandardSymbolicFunction.from_sarif( + **{ + "id": "POE0003", + "name": "missing-standard-symbolic-function", + "short_description": { + "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX." + }, + "full_description": { + "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.", + "markdown": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.\n", + }, + "message_strings": { + "default": { + "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.""" + + operator_supported_in_newer_opset_version: _OperatorSupportedInNewerOpsetVersion = dataclasses.field( + default=_OperatorSupportedInNewerOpsetVersion.from_sarif( + **{ + "id": "POE0004", + "name": "operator-supported-in-newer-opset-version", + "short_description": { + "text": "Operator is supported in newer opset version." + }, + "full_description": { + "text": "Operator is supported in newer opset version.", + "markdown": "Operator is supported in newer opset version.\n\nExample:\n```python\ntorch.onnx.export(model, args, ..., opset_version=9)\n```\n", + }, + "message_strings": { + "default": { + "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Operator is supported in newer opset version.""" + + fx_graph_to_onnx: _FxGraphToOnnx = dataclasses.field( + default=_FxGraphToOnnx.from_sarif( + **{ + "id": "FXE0007", + "name": "fx-graph-to-onnx", + "short_description": { + "text": "Transforms graph from FX IR to ONNX IR." + }, + "full_description": { + "text": "Transforms graph from FX IR to ONNX IR.", + "markdown": "This diagnostic tracks the transformation process from an FX Graph (in FX IR) to an ONNX Graph (in ONNX IR).\n\n## Key Representations:\n\n- **FX Graph**: The graph in FX IR produced by dynamo or symbolic tracing.\n- **ONNX Graph**: The graph in ONNX IR and [operators](https://onnx.ai/onnx/operators/).\n\n## Additional Notes:\n\n- Prior to this transformation step, the FX graph undergoes preprocessing through multiple FX passes.\n To gain insight into these transformations, refer to diagnostic `FXE0010`.\n- To enable a detailed view of the graph transformation in progress within this diagnostic, switch to the DEBUG mode.\n\n - Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n - Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\n- For specific information related to node-level FX to ONNX transformations, explore the diagnostic `FXE0008`.\n", + }, + "message_strings": { + "default": { + "text": "Transforming FX graph {graph_name} to ONNX graph." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Transforms graph from FX IR to ONNX IR.""" + + fx_node_to_onnx: _FxNodeToOnnx = dataclasses.field( + default=_FxNodeToOnnx.from_sarif( + **{ + "id": "FXE0008", + "name": "fx-node-to-onnx", + "short_description": {"text": "Transforms an FX node to an ONNX node."}, + "full_description": { + "text": "Transforms an FX node to an ONNX node.", + "markdown": "This diagnostic tracks the transformation process from an FX Node to ONNX [Operators](https://onnx.ai/onnx/operators/).\n\nThe process of converting FX Node to ONNX Node involves dealing with six distinct node types:\n 1. `placeholder`: Represents a module input, maps to an ONNX graph input.\n 2. `call_module`: Symbolizes a call to a submodule, maps to an ONNX\n 3. `call_method`: Symbolizes a method call. Not yet implemented.\n 4. `call_function`: Symbolizes a function call. [Core ATen](https://pytorch.org/docs/stable/ir.html#core-aten-ir) is expected\n as the function call target. The mapping from ATen to ONNX is implemented by [ONNXScript torchlib](https://github.com/microsoft/onnxscript/tree/main/onnxscript/function_libs/torch_lib/ops).\n This [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) shows how to write and register a custom symbolic function for call_function FX node.\n 5. `get_attr`: Indicates an attribute access within the current module. Maps to an ONNX graph initializer.\n 6. `output`: Represents the module's output. Maps to an ONNX graph output.\n\nFor a granular understanding of how each node type is transformed, refer to the implementation details in `FxOnnxInterpreter`.\n", + }, + "message_strings": { + "default": { + "text": "Transforming FX node {node_repr} to ONNX node." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Transforms an FX node to an ONNX node.""" + + fx_pass: _FxPass = dataclasses.field( + default=_FxPass.from_sarif( + **{ + "id": "FXE0010", + "name": "fx-pass", + "short_description": { + "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR." + }, + "full_description": { + "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR.", + "markdown": "This diagnostic tracks the FX passes executed during the ONNX export process prior\nto converting from FX IR (Intermediate Representation) to ONNX IR.\n\nUnder the scope of ONNX export, an FX pass refers to a specific transformation applied to the FX GraphModule.\nThe primary aim of these passes is to streamline the graph into a format that aligns more with the ONNX IR.\nMoreover, these passes work to substitute unsupported FX IR features with those recognized and endorsed by\nONNX IR. Common transformations include, but aren't limited to, decomposition, functionalization and\ntype promotion.\n\nFor those who are interested in a comprehensive log detailing the modifications made during these passes,\nthere are a couple of options:\n\n- Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n- Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\nHowever, it's noteworthy that by default, such detailed logging is turned off. The primary reason being\nits considerable impact on performance.\n\nFor an in-depth understanding of each specific pass, please refer to the directory: torch/onnx/_internal/fx/passes.\n", + }, + "message_strings": {"default": {"text": "Running {pass_name} pass."}}, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """FX graph transformation during ONNX export before converting from FX IR to ONNX IR.""" + + no_symbolic_function_for_call_function: _NoSymbolicFunctionForCallFunction = dataclasses.field( + default=_NoSymbolicFunctionForCallFunction.from_sarif( + **{ + "id": "FXE0011", + "name": "no-symbolic-function-for-call-function", + "short_description": { + "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX.' + }, + "full_description": { + "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX. ', + "markdown": 'This error occurs when the ONNX converter is unable to find a corresponding symbolic function\nto convert a "call_function" node in the input graph to its equivalence in ONNX. The "call_function"\nnode represents a normalized function call in PyTorch, such as "torch.aten.ops.add".\n\nTo resolve this error, you can try one of the following:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/tutorials/beginner/onnx/onnx_registry_tutorial.html#overview) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n', + }, + "message_strings": { + "default": { + "text": 'No symbolic function to convert the "call_function" node {target} to ONNX. ' + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Cannot find symbolic function to convert the "call_function" FX node to ONNX.""" + + unsupported_fx_node_analysis: _UnsupportedFxNodeAnalysis = dataclasses.field( + default=_UnsupportedFxNodeAnalysis.from_sarif( + **{ + "id": "FXE0012", + "name": "unsupported-fx-node-analysis", + "short_description": { + "text": "Result from FX graph analysis to reveal unsupported FX nodes." + }, + "full_description": { + "text": "Result from FX graph analysis to reveal unsupported FX nodes.", + "markdown": "This error indicates that an FX graph contains one or more unsupported nodes. The error message\nis typically accompanied by a list of the unsupported nodes found during analysis.\n\nTo resolve this error, you can try resolving each individual unsupported node error by following\nthe suggestions by its diagnostic. Typically, options include:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n", + }, + "message_strings": { + "default": { + "text": "Unsupported FX nodes: {node_op_to_target_mapping}. " + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Result from FX graph analysis to reveal unsupported FX nodes.""" + + op_level_debugging: _OpLevelDebugging = dataclasses.field( + default=_OpLevelDebugging.from_sarif( + **{ + "id": "FXE0013", + "name": "op-level-debugging", + "short_description": { + "text": "Report any op level validation failure in warnings." + }, + "full_description": { + "text": "Report any op level validation failure in warnings.", + "markdown": "This warning message indicates that during op level debugging, certain symbolic functions\nhave failed to match the results of torch ops when using real tensors generated from fake\ntensors. It is important to note that the symbolic functions may not necessarily be\nincorrect, as the validation process is non-deterministic and should only be used as a\nreference.\n\nThere are two categories of warnings that can be triggered:\n\n1. Non-validated operators:\n If the warnings are caused by the following errors, they can be disregarded by users,\n as these errors occur due to the non-deterministic nature of the validation. However,\n it is important to be aware that the operators have not been validated.\n\n - IndexError: Unsupported input arguments of randomized dimensions/indices(INT64).\n - RuntimeError: Unsupported input arguments for torch ops are generated.\n - ValueError: Arguments/keyword arguments do not match the signature of the symbolic function.\n\n2. Potentially wrong torchlib operators:\n If the warnings are triggered by the following error, users should be aware that the symbolic functions\n may be incorrect in dispatching or implementation. In such cases, it is recommended to report\n the issue to the PyTorch-ONNX team, or create/register a custom symbolic function to replace the default one.\n\n - AssertionError: The symbolic function is potentially wrong as the results do not match the results of torch ops.\n - TypeError: The symbolic function is potentially wrong as the opschema doesn't match inputs.\n", + }, + "message_strings": { + "default": { + "text": "FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Report any op level validation failure in warnings.""" + + find_opschema_matched_symbolic_function: _FindOpschemaMatchedSymbolicFunction = dataclasses.field( + default=_FindOpschemaMatchedSymbolicFunction.from_sarif( + **{ + "id": "FXE0014", + "name": "find-opschema-matched-symbolic-function", + "short_description": { + "text": "Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas." + }, + "full_description": { + "text": "Find the OnnxFunction that matches the input dtypes by comparing them with their opschemas. A warning will be issued if the matched OnnxFunction is not an exact match.", + "markdown": "When an ATen/Custom operator is registered and needs to be dispatched to an OnnxFunction, the input/attribute\ndtypes of the ATen/Custom operator are compared with the input/attribute dtypes of the OnnxFunction opschemas\nto find a match. However, if a perfect/exact match is not found, the dispatcher will attempt to find\nthe nearest match with the highest number of input/attribute dtypes matching the OnnxFunction opschemas, while\nissuing a warning.\n\nThere are two types of level that can be triggered in this rule:\n\n1. NOTE: A perfect match is found, and no warning is issued.\n2. WARNING: The matched OnnxFunction is not a perfect/exact match.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning,\n as the definition of OnnxFunction schema is usually more stringent.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the issue to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n", + }, + "message_strings": { + "default": { + "text": "The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas.""" + + fx_node_insert_type_promotion: _FxNodeInsertTypePromotion = dataclasses.field( + default=_FxNodeInsertTypePromotion.from_sarif( + **{ + "id": "FXE0015", + "name": "fx-node-insert-type-promotion", + "short_description": { + "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed." + }, + "full_description": { + "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed.", + "markdown": "This diagnostic monitors the node-level type promotion insertion process. In PyTorch, there is an automatic process called implicit type promotion,\nwhere the input types of an operator are promoted to a common type. The determination of the common type is based on the type promotion rule specific to each operator.\nTo learn more about PyTorch's type promotion rules, refer to the [elementwise_dtypes doc](https://github.com/pytorch/pytorch/blob/f044613f78df713fb57f70c608483c9f10ad332e/torch/_prims_common/__init__.py#L1252-L1335)\nand [torch._refs ops](https://github.com/pytorch/pytorch/blob/a475ea4542dfe961c9d097e33ab5041f61c8c17f/torch/_refs/__init__.py#L484).\n\nHowever, implicit type promotion is not supported in ONNX. Therefore, to replicate the PyTorch behavior, we need to explicitly insert cast nodes.\nThis diagnostic tracks the process of node-level type promotion insertion.\n\nThe type promotion rules used by this process can be found in `torch/onnx/_internal/fx/passes/type_promotion.py.`\nTo update or add new type promotion rules, please refer to the [Note: Update type promotion rule] section.\n", + }, + "message_strings": { + "default": { + "text": "Performing explicit type promotion for node {target}. " + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Determine if type promotion is required for the FX node. Insert cast nodes if needed.""" + + find_operator_overloads_in_onnx_registry: _FindOperatorOverloadsInOnnxRegistry = dataclasses.field( + default=_FindOperatorOverloadsInOnnxRegistry.from_sarif( + **{ + "id": "FXE0016", + "name": "find-operator-overloads-in-onnx-registry", + "short_description": { + "text": "Find the list of OnnxFunction of the PyTorch operator in onnx registry." + }, + "full_description": { + "text": "This rule involves finding the list of OnnxFunction for the PyTorch operator overload in the ONNX registry. If the operator overload is not supported but its default overload is, a warning will be issued. If both the operator overload and its default overload are not supported, an error will be issued.", + "markdown": "The operator overload name serves the purpose of verifying whether a PyTorch operator is registered in the ONNX registry.\nIf it's not found, the dispatcher takes a fallback approach and tries to locate the default overload of the PyTorch\noperator in the registry. If even the default overload is absent, it signifies that the operator is officially unsupported.\n\nThere are three types of level that can be triggered in this rule:\n\n1. NOTE: The op overload is supported.\n2. WARNING: The op overload is not supported, but it's default overload is supported.\n3. ERROR: The op overload is not supported, and it's default overload is also not supported.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the unsupported overload to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n\nHere are some suggestions based on the ERROR situation:\n\n1. Report the unsupported operator to the PyTorch-ONNX team.\n2. Create/register a custom symbolic function to replace the default one.\n", + }, + "message_strings": { + "default": { + "text": "Checking if the FX node: {node} is supported in onnx registry." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Find the list of OnnxFunction of the PyTorch operator in onnx registry.""" + + +rules = _POERules() diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__init__.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb6bb444dff6399dee5a56e28070ad4428a0edc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/__init__.py @@ -0,0 +1,33 @@ +from ._infra import ( + DiagnosticOptions, + Graph, + Invocation, + Level, + levels, + Location, + Rule, + RuleCollection, + Stack, + StackFrame, + Tag, + ThreadFlowLocation, +) +from .context import Diagnostic, DiagnosticContext, RuntimeErrorWithDiagnostic + +__all__ = [ + "Diagnostic", + "DiagnosticContext", + "DiagnosticOptions", + "Graph", + "Invocation", + "Level", + "levels", + "Location", + "Rule", + "RuleCollection", + "RuntimeErrorWithDiagnostic", + "Stack", + "StackFrame", + "Tag", + "ThreadFlowLocation", +] diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/_infra.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/_infra.py new file mode 100644 index 0000000000000000000000000000000000000000..325cdc44ac7257cb7915db084295ae645e6c1eec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/_infra.py @@ -0,0 +1,284 @@ +"""This file defines an additional layer of abstraction on top of the SARIF OM.""" + +from __future__ import annotations + +import dataclasses +import enum +import logging +from typing import FrozenSet, List, Mapping, Optional, Sequence, Tuple + +from torch.onnx._internal.diagnostics.infra import formatter, sarif + + +class Level(enum.IntEnum): + """The level of a diagnostic. + + This class is used to represent the level of a diagnostic. The levels are defined + by the SARIF specification, and are not modifiable. For alternative categories, + please use infra.Tag instead. When selecting a level, please consider the following + guidelines: + + - NONE: Informational result that does not indicate the presence of a problem. + - NOTE: An opportunity for improvement was found. + - WARNING: A potential problem was found. + - ERROR: A serious problem was found. + + This level is a subclass of enum.IntEnum, and can be used as an integer. Its integer + value maps to the logging levels in Python's logging module. The mapping is as + follows: + + Level.NONE = logging.DEBUG = 10 + Level.NOTE = logging.INFO = 20 + Level.WARNING = logging.WARNING = 30 + Level.ERROR = logging.ERROR = 40 + """ + + NONE = 10 + NOTE = 20 + WARNING = 30 + ERROR = 40 + + +levels = Level + + +class Tag(enum.Enum): + """The tag of a diagnostic. This class can be inherited to define custom tags.""" + + +class PatchedPropertyBag(sarif.PropertyBag): + """Key/value pairs that provide additional information about the object. + + The definition of PropertyBag via SARIF spec is "A property bag is an object (ยง3.6) + containing an unordered set of properties with arbitrary names." However it is not + reflected in the json file, and therefore not captured by the python representation. + This patch adds additional **kwargs to the `__init__` method to allow recording + arbitrary key/value pairs. + """ + + def __init__(self, tags: Optional[List[str]] = None, **kwargs): + super().__init__(tags=tags) + self.__dict__.update(kwargs) + + +@dataclasses.dataclass(frozen=True) +class Rule: + id: str + name: str + message_default_template: str + short_description: Optional[str] = None + full_description: Optional[str] = None + full_description_markdown: Optional[str] = None + help_uri: Optional[str] = None + + @classmethod + def from_sarif(cls, **kwargs): + """Returns a rule from the SARIF reporting descriptor.""" + short_description = kwargs.get("short_description", {}).get("text") + full_description = kwargs.get("full_description", {}).get("text") + full_description_markdown = kwargs.get("full_description", {}).get("markdown") + help_uri = kwargs.get("help_uri") + + rule = cls( + id=kwargs["id"], + name=kwargs["name"], + message_default_template=kwargs["message_strings"]["default"]["text"], + short_description=short_description, + full_description=full_description, + full_description_markdown=full_description_markdown, + help_uri=help_uri, + ) + return rule + + def sarif(self) -> sarif.ReportingDescriptor: + """Returns a SARIF reporting descriptor of this Rule.""" + short_description = ( + sarif.MultiformatMessageString(text=self.short_description) + if self.short_description is not None + else None + ) + full_description = ( + sarif.MultiformatMessageString( + text=self.full_description, markdown=self.full_description_markdown + ) + if self.full_description is not None + else None + ) + return sarif.ReportingDescriptor( + id=self.id, + name=self.name, + short_description=short_description, + full_description=full_description, + help_uri=self.help_uri, + ) + + def format(self, level: Level, *args, **kwargs) -> Tuple[Rule, Level, str]: + """Returns a tuple of (rule, level, message) for a diagnostic. + + This method is used to format the message of a diagnostic. The message is + formatted using the default template of this rule, and the arguments passed in + as `*args` and `**kwargs`. The level is used to override the default level of + this rule. + """ + return (self, level, self.format_message(*args, **kwargs)) + + def format_message(self, *args, **kwargs) -> str: + """Returns the formatted default message of this Rule. + + This method should be overridden (with code generation) by subclasses to reflect + the exact arguments needed by the message template. This is a helper method to + create the default message for a diagnostic. + """ + return self.message_default_template.format(*args, **kwargs) + + +@dataclasses.dataclass +class Location: + uri: Optional[str] = None + line: Optional[int] = None + message: Optional[str] = None + start_column: Optional[int] = None + end_column: Optional[int] = None + snippet: Optional[str] = None + function: Optional[str] = None + + def sarif(self) -> sarif.Location: + """Returns the SARIF representation of this location.""" + return sarif.Location( + physical_location=sarif.PhysicalLocation( + artifact_location=sarif.ArtifactLocation(uri=self.uri), + region=sarif.Region( + start_line=self.line, + start_column=self.start_column, + end_column=self.end_column, + snippet=sarif.ArtifactContent(text=self.snippet), + ), + ), + message=sarif.Message(text=self.message) + if self.message is not None + else None, + ) + + +@dataclasses.dataclass +class StackFrame: + location: Location + + def sarif(self) -> sarif.StackFrame: + """Returns the SARIF representation of this stack frame.""" + return sarif.StackFrame(location=self.location.sarif()) + + +@dataclasses.dataclass +class Stack: + """Records a stack trace. The frames are in order from newest to oldest stack frame.""" + + frames: List[StackFrame] = dataclasses.field(default_factory=list) + message: Optional[str] = None + + def sarif(self) -> sarif.Stack: + """Returns the SARIF representation of this stack.""" + return sarif.Stack( + frames=[frame.sarif() for frame in self.frames], + message=sarif.Message(text=self.message) + if self.message is not None + else None, + ) + + +@dataclasses.dataclass +class ThreadFlowLocation: + """Records code location and the initial state.""" + + location: Location + state: Mapping[str, str] + index: int + stack: Optional[Stack] = None + + def sarif(self) -> sarif.ThreadFlowLocation: + """Returns the SARIF representation of this thread flow location.""" + return sarif.ThreadFlowLocation( + location=self.location.sarif(), + state=self.state, + stack=self.stack.sarif() if self.stack is not None else None, + ) + + +@dataclasses.dataclass +class Graph: + """A graph of diagnostics. + + This class stores the string representation of a model graph. + The `nodes` and `edges` fields are unused in the current implementation. + """ + + graph: str + name: str + description: Optional[str] = None + + def sarif(self) -> sarif.Graph: + """Returns the SARIF representation of this graph.""" + return sarif.Graph( + description=sarif.Message(text=self.graph), + properties=PatchedPropertyBag(name=self.name, description=self.description), + ) + + +@dataclasses.dataclass +class RuleCollection: + _rule_id_name_set: FrozenSet[Tuple[str, str]] = dataclasses.field(init=False) + + def __post_init__(self) -> None: + self._rule_id_name_set = frozenset( + { + (field.default.id, field.default.name) + for field in dataclasses.fields(self) + if isinstance(field.default, Rule) + } + ) + + def __contains__(self, rule: Rule) -> bool: + """Checks if the rule is in the collection.""" + return (rule.id, rule.name) in self._rule_id_name_set + + @classmethod + def custom_collection_from_list( + cls, new_collection_class_name: str, rules: Sequence[Rule] + ) -> RuleCollection: + """Creates a custom class inherited from RuleCollection with the list of rules.""" + return dataclasses.make_dataclass( + new_collection_class_name, + [ + ( + formatter.kebab_case_to_snake_case(rule.name), + type(rule), + dataclasses.field(default=rule), + ) + for rule in rules + ], + bases=(cls,), + )() + + +class Invocation: + # TODO: Implement this. + # Tracks top level call arguments and diagnostic options. + def __init__(self) -> None: + raise NotImplementedError() + + +@dataclasses.dataclass +class DiagnosticOptions: + """Options for diagnostic context. + + Attributes: + verbosity_level: Set the amount of information logged for each diagnostics, + equivalent to the 'level' in Python logging module. + warnings_as_errors: When True, warning diagnostics are treated as error diagnostics. + """ + + verbosity_level: int = dataclasses.field(default=logging.INFO) + """Set the amount of information logged for each diagnostics, equivalent to the 'level' in Python logging module.""" + + warnings_as_errors: bool = dataclasses.field(default=False) + """If True, warning diagnostics are treated as error diagnostics.""" diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/context.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/context.py new file mode 100644 index 0000000000000000000000000000000000000000..22370850df86a3a48e79915e199e9c008ceb5a93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/context.py @@ -0,0 +1,415 @@ +"""A diagnostic context based on SARIF.""" + +from __future__ import annotations + +import contextlib + +import dataclasses +import gzip + +import logging + +from typing import ( + Callable, + Generator, + Generic, + List, + Literal, + Mapping, + Optional, + Type, + TypeVar, +) + +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.diagnostics.infra import formatter, sarif, utils +from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version + + +# This is a workaround for mypy not supporting Self from typing_extensions. +_Diagnostic = TypeVar("_Diagnostic", bound="Diagnostic") +diagnostic_logger: logging.Logger = logging.getLogger(__name__) + + +@dataclasses.dataclass +class Diagnostic: + rule: infra.Rule + level: infra.Level + message: Optional[str] = None + locations: List[infra.Location] = dataclasses.field(default_factory=list) + stacks: List[infra.Stack] = dataclasses.field(default_factory=list) + graphs: List[infra.Graph] = dataclasses.field(default_factory=list) + thread_flow_locations: List[infra.ThreadFlowLocation] = dataclasses.field( + default_factory=list + ) + additional_messages: List[str] = dataclasses.field(default_factory=list) + tags: List[infra.Tag] = dataclasses.field(default_factory=list) + source_exception: Optional[Exception] = None + """The exception that caused this diagnostic to be created.""" + logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger) + """The logger for this diagnostic. Defaults to 'diagnostic_logger' which has the same + log level setting with `DiagnosticOptions.verbosity_level`.""" + _current_log_section_depth: int = 0 + + def __post_init__(self) -> None: + pass + + def sarif(self) -> sarif.Result: + """Returns the SARIF Result representation of this diagnostic.""" + message = self.message or self.rule.message_default_template + if self.additional_messages: + additional_message = "\n".join(self.additional_messages) + message_markdown = ( + f"{message}\n\n## Additional Message:\n\n{additional_message}" + ) + else: + message_markdown = message + + kind: Literal["informational", "fail"] = ( + "informational" if self.level == infra.Level.NONE else "fail" + ) + + sarif_result = sarif.Result( + message=sarif.Message(text=message, markdown=message_markdown), + level=self.level.name.lower(), # type: ignore[arg-type] + rule_id=self.rule.id, + kind=kind, + ) + sarif_result.locations = [location.sarif() for location in self.locations] + sarif_result.stacks = [stack.sarif() for stack in self.stacks] + sarif_result.graphs = [graph.sarif() for graph in self.graphs] + sarif_result.code_flows = [ + sarif.CodeFlow( + thread_flows=[ + sarif.ThreadFlow( + locations=[loc.sarif() for loc in self.thread_flow_locations] + ) + ] + ) + ] + sarif_result.properties = sarif.PropertyBag( + tags=[tag.value for tag in self.tags] + ) + return sarif_result + + def with_location(self: _Diagnostic, location: infra.Location) -> _Diagnostic: + """Adds a location to the diagnostic.""" + self.locations.append(location) + return self + + def with_thread_flow_location( + self: _Diagnostic, location: infra.ThreadFlowLocation + ) -> _Diagnostic: + """Adds a thread flow location to the diagnostic.""" + self.thread_flow_locations.append(location) + return self + + def with_stack(self: _Diagnostic, stack: infra.Stack) -> _Diagnostic: + """Adds a stack to the diagnostic.""" + self.stacks.append(stack) + return self + + def with_graph(self: _Diagnostic, graph: infra.Graph) -> _Diagnostic: + """Adds a graph to the diagnostic.""" + self.graphs.append(graph) + return self + + @contextlib.contextmanager + def log_section( + self, level: int, message: str, *args, **kwargs + ) -> Generator[None, None, None]: + """ + Context manager for a section of log messages, denoted by a title message and increased indentation. + + Same api as `logging.Logger.log`. + + This context manager logs the given title at the specified log level, increases the current + section depth for subsequent log messages, and ensures that the section depth is decreased + again when exiting the context. + + Args: + level: The log level. + message: The title message to log. + *args: The arguments to the message. Use `LazyString` to defer the + expensive evaluation of the arguments until the message is actually logged. + **kwargs: The keyword arguments for `logging.Logger.log`. + + Yields: + None: This context manager does not yield any value. + + Example: + >>> with DiagnosticContext("DummyContext", "1.0"): + ... rule = infra.Rule("RuleID", "DummyRule", "Rule message") + ... diagnostic = Diagnostic(rule, infra.Level.WARNING) + ... with diagnostic.log_section(logging.INFO, "My Section"): + ... diagnostic.log(logging.INFO, "My Message") + ... with diagnostic.log_section(logging.INFO, "My Subsection"): + ... diagnostic.log(logging.INFO, "My Submessage") + ... diagnostic.additional_messages + ['## My Section', 'My Message', '### My Subsection', 'My Submessage'] + """ + if self.logger.isEnabledFor(level): + indented_format_message = ( + f"##{'#' * self._current_log_section_depth } {message}" + ) + self.log( + level, + indented_format_message, + *args, + **kwargs, + ) + self._current_log_section_depth += 1 + try: + yield + finally: + self._current_log_section_depth -= 1 + + def log(self, level: int, message: str, *args, **kwargs) -> None: + """Logs a message within the diagnostic. Same api as `logging.Logger.log`. + + If logger is not enabled for the given level, the message will not be logged. + Otherwise, the message will be logged and also added to the diagnostic's additional_messages. + + The default setting for `DiagnosticOptions.verbosity_level` is `logging.INFO`. Based on this default, + the log level recommendations are as follows. If you've set a different default verbosity level in your + application, please adjust accordingly: + + - logging.ERROR: Log any events leading to application failure. + - logging.WARNING: Log events that might result in application issues or failures, although not guaranteed. + - logging.INFO: Log general useful information, ensuring minimal performance overhead. + - logging.DEBUG: Log detailed debug information, which might affect performance when logged. + + Args: + level: The log level. + message: The message to log. + *args: The arguments to the message. Use `LazyString` to defer the + expensive evaluation of the arguments until the message is actually logged. + **kwargs: The keyword arguments for `logging.Logger.log`. + """ + if self.logger.isEnabledFor(level): + formatted_message = message % args + self.logger.log(level, formatted_message, **kwargs) + self.additional_messages.append(formatted_message) + + def debug(self, message: str, *args, **kwargs) -> None: + """Logs a debug message within the diagnostic. Same api as logging.Logger.debug. + + Checkout `log` for more details. + """ + self.log(logging.DEBUG, message, *args, **kwargs) + + def info(self, message: str, *args, **kwargs) -> None: + """Logs an info message within the diagnostic. Same api as logging.Logger.info. + + Checkout `log` for more details. + """ + self.log(logging.INFO, message, *args, **kwargs) + + def warning(self, message: str, *args, **kwargs) -> None: + """Logs a warning message within the diagnostic. Same api as logging.Logger.warning. + + Checkout `log` for more details. + """ + self.log(logging.WARNING, message, *args, **kwargs) + + def error(self, message: str, *args, **kwargs) -> None: + """Logs an error message within the diagnostic. Same api as logging.Logger.error. + + Checkout `log` for more details. + """ + self.log(logging.ERROR, message, *args, **kwargs) + + def log_source_exception(self, level: int, exception: Exception) -> None: + """Logs a source exception within the diagnostic. + + Invokes `log_section` and `log` to log the exception in markdown section format. + """ + self.source_exception = exception + with self.log_section(level, "Exception log"): + self.log(level, "%s", formatter.lazy_format_exception(exception)) + + def record_python_call_stack(self, frames_to_skip: int) -> infra.Stack: + """Records the current Python call stack.""" + frames_to_skip += 1 # Skip this function. + stack = utils.python_call_stack(frames_to_skip=frames_to_skip) + self.with_stack(stack) + if len(stack.frames) > 0: + self.with_location(stack.frames[0].location) + return stack + + def record_python_call( + self, + fn: Callable, + state: Mapping[str, str], + message: Optional[str] = None, + frames_to_skip: int = 0, + ) -> infra.ThreadFlowLocation: + """Records a python call as one thread flow step.""" + frames_to_skip += 1 # Skip this function. + stack = utils.python_call_stack(frames_to_skip=frames_to_skip, frames_to_log=5) + location = utils.function_location(fn) + location.message = message + # Add function location to the top of the stack. + stack.frames.insert(0, infra.StackFrame(location=location)) + thread_flow_location = infra.ThreadFlowLocation( + location=location, + state=state, + index=len(self.thread_flow_locations), + stack=stack, + ) + self.with_thread_flow_location(thread_flow_location) + return thread_flow_location + + +class RuntimeErrorWithDiagnostic(RuntimeError): + """Runtime error with enclosed diagnostic information.""" + + def __init__(self, diagnostic: Diagnostic): + super().__init__(diagnostic.message) + self.diagnostic = diagnostic + + +@dataclasses.dataclass +class DiagnosticContext(Generic[_Diagnostic]): + name: str + version: str + options: infra.DiagnosticOptions = dataclasses.field( + default_factory=infra.DiagnosticOptions + ) + diagnostics: List[_Diagnostic] = dataclasses.field(init=False, default_factory=list) + # TODO(bowbao): Implement this. + # _invocation: infra.Invocation = dataclasses.field(init=False) + _inflight_diagnostics: List[_Diagnostic] = dataclasses.field( + init=False, default_factory=list + ) + _previous_log_level: int = dataclasses.field(init=False, default=logging.WARNING) + logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger) + _bound_diagnostic_type: Type = dataclasses.field(init=False, default=Diagnostic) + + def __enter__(self): + self._previous_log_level = self.logger.level + self.logger.setLevel(self.options.verbosity_level) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.logger.setLevel(self._previous_log_level) + return None + + def sarif(self) -> sarif.Run: + """Returns the SARIF Run object.""" + unique_rules = {diagnostic.rule for diagnostic in self.diagnostics} + return sarif.Run( + sarif.Tool( + driver=sarif.ToolComponent( + name=self.name, + version=self.version, + rules=[rule.sarif() for rule in unique_rules], + ) + ), + results=[diagnostic.sarif() for diagnostic in self.diagnostics], + ) + + def sarif_log(self) -> sarif.SarifLog: # type: ignore[name-defined] + """Returns the SARIF Log object.""" + return sarif.SarifLog( + version=sarif_version.SARIF_VERSION, + schema_uri=sarif_version.SARIF_SCHEMA_LINK, + runs=[self.sarif()], + ) + + def to_json(self) -> str: + return formatter.sarif_to_json(self.sarif_log()) + + def dump(self, file_path: str, compress: bool = False) -> None: + """Dumps the SARIF log to a file.""" + if compress: + with gzip.open(file_path, "wt") as f: + f.write(self.to_json()) + else: + with open(file_path, "w") as f: + f.write(self.to_json()) + + def log(self, diagnostic: _Diagnostic) -> None: + """Logs a diagnostic. + + This method should be used only after all the necessary information for the diagnostic + has been collected. + + Args: + diagnostic: The diagnostic to add. + """ + if not isinstance(diagnostic, self._bound_diagnostic_type): + raise TypeError( + f"Expected diagnostic of type {self._bound_diagnostic_type}, got {type(diagnostic)}" + ) + if self.options.warnings_as_errors and diagnostic.level == infra.Level.WARNING: + diagnostic.level = infra.Level.ERROR + self.diagnostics.append(diagnostic) + + def log_and_raise_if_error(self, diagnostic: _Diagnostic) -> None: + """Logs a diagnostic and raises an exception if it is an error. + + Use this method for logging non inflight diagnostics where diagnostic level is not known or + lower than ERROR. If it is always expected raise, use `log` and explicit + `raise` instead. Otherwise there is no way to convey the message that it always + raises to Python intellisense and type checking tools. + + This method should be used only after all the necessary information for the diagnostic + has been collected. + + Args: + diagnostic: The diagnostic to add. + """ + self.log(diagnostic) + if diagnostic.level == infra.Level.ERROR: + if diagnostic.source_exception is not None: + raise diagnostic.source_exception + raise RuntimeErrorWithDiagnostic(diagnostic) + + @contextlib.contextmanager + def add_inflight_diagnostic( + self, diagnostic: _Diagnostic + ) -> Generator[_Diagnostic, None, None]: + """Adds a diagnostic to the context. + + Use this method to add diagnostics that are not created by the context. + Args: + diagnostic: The diagnostic to add. + """ + self._inflight_diagnostics.append(diagnostic) + try: + yield diagnostic + finally: + self._inflight_diagnostics.pop() + + def push_inflight_diagnostic(self, diagnostic: _Diagnostic) -> None: + """Pushes a diagnostic to the inflight diagnostics stack. + + Args: + diagnostic: The diagnostic to push. + + Raises: + ValueError: If the rule is not supported by the tool. + """ + self._inflight_diagnostics.append(diagnostic) + + def pop_inflight_diagnostic(self) -> _Diagnostic: + """Pops the last diagnostic from the inflight diagnostics stack. + + Returns: + The popped diagnostic. + """ + return self._inflight_diagnostics.pop() + + def inflight_diagnostic(self, rule: Optional[infra.Rule] = None) -> _Diagnostic: + if rule is None: + # TODO(bowbao): Create builtin-rules and create diagnostic using that. + if len(self._inflight_diagnostics) <= 0: + raise AssertionError("No inflight diagnostics") + + return self._inflight_diagnostics[-1] + else: + for diagnostic in reversed(self._inflight_diagnostics): + if diagnostic.rule == rule: + return diagnostic + raise AssertionError(f"No inflight diagnostic for rule {rule.name}") diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/decorator.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac803815703e258201d78417e9faeb3a1efc02d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/decorator.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import functools +import logging +import traceback +from typing import Any, Callable, Dict, Optional, Tuple, Type + +from torch.onnx._internal import _beartype +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.diagnostics.infra import formatter, utils + + +MessageFormatterType = Callable[..., str] + + +@_beartype.beartype +def format_message_in_text(fn: Callable, *args: Any, **kwargs: Any) -> str: + return f"{formatter.display_name(fn)}. " + + +@_beartype.beartype +def format_exception_in_markdown(exception: Exception) -> str: + msg_list = ["### Exception log", "```"] + msg_list.extend( + traceback.format_exception(type(exception), exception, exception.__traceback__) + ) + msg_list.append("```") + return "\n".join(msg_list) + + +@_beartype.beartype +def format_function_signature_in_markdown( + fn: Callable, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + format_argument: Callable[[Any], str] = formatter.format_argument, +) -> str: + msg_list = [f"### Function Signature {formatter.display_name(fn)}"] + + state = utils.function_state(fn, args, kwargs) + + for k, v in state.items(): + msg_list.append(f"- {k}: {format_argument(v)}") + + return "\n".join(msg_list) + + +@_beartype.beartype +def format_return_values_in_markdown( + return_values: Any, + format_argument: Callable[[Any], str] = formatter.format_argument, +) -> str: + return f"{format_argument(return_values)}" + + +ModifierCallableType = Callable[ + [infra.Diagnostic, Callable, Tuple[Any, ...], Dict[str, Any], Any], None +] + + +@_beartype.beartype +def diagnose_call( + rule: infra.Rule, + *, + level: infra.Level = infra.Level.NONE, + diagnostic_type: Type[infra.Diagnostic] = infra.Diagnostic, + format_argument: Callable[[Any], str] = formatter.format_argument, + diagnostic_message_formatter: MessageFormatterType = format_message_in_text, +) -> Callable: + def decorator(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + common_error_message = "diagnose_call can only be applied to callables" + if not callable(fn): + raise AssertionError( + f"{common_error_message}. Got {type(fn)} instead of callable." + ) + arg0 = args[0] if len(args) > 0 else None + if isinstance(ctx := arg0, infra.DiagnosticContext): + pass + elif isinstance( + ctx := getattr(arg0, "diagnostic_context", None), + infra.DiagnosticContext, + ): + pass + else: + # NOTE: At decorate time, it can't tell if a callable is function or method. + # Technically both are regarded as function at that time. + raise AssertionError( + f"{common_error_message}. For {fn}, " + f"If it is a function, a DiagnosticContext instance must be present as " + f"the first argument. " + f"If it is a method, a DiagnosticContext instance must be present as " + f"the attribute 'diagnostic_context' of the 'self' argument." + ) + + diag = diagnostic_type( + rule, + level, + diagnostic_message_formatter(fn, *args, **kwargs), + ) + + # pop the decorator frame + # TODO(bowbao): by default diagnostic doesn't have stack. + # So need to check before doing this. Make the code cleaner. + # Option: do not capture stack by default in diagnostic initialization. + stack: Optional[infra.Stack] = None + if len(diag.stacks) > 0: + stack = diag.stacks[0] + stack.frames.pop(0) + + # set function location + fn_location = utils.function_location(fn) + diag.locations.insert(0, fn_location) + # Add function location to the top of the stack. + if stack is not None: + stack.frames.insert(0, infra.StackFrame(location=fn_location)) + + with diag.log_section(logging.INFO, "Function Signature"): + diag.log( + logging.INFO, + "%s", + formatter.LazyString( + format_function_signature_in_markdown, + fn, + args, + kwargs, + format_argument, + ), + ) + + return_values: Any = None + with ctx.add_inflight_diagnostic(diag) as diag: + try: + return_values = fn(*args, **kwargs) + with diag.log_section(logging.INFO, "Return values"): + diag.log( + logging.INFO, + "%s", + formatter.LazyString( + format_return_values_in_markdown, + return_values, + format_argument, + ), + ) + return return_values + except Exception as e: + diag.log_source_exception(logging.ERROR, e) + diag.level = infra.Level.ERROR + finally: + ctx.log_and_raise_if_error(diag) + + return wrapper + + return decorator + + +# TODO(bowbao): decorator to report only when failed. diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/formatter.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..d5b69924f3bd3cf1862d010fa9dbc9257cf896f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/formatter.py @@ -0,0 +1,114 @@ +from __future__ import annotations + +import dataclasses +import json +import re +import traceback +from typing import Any, Callable, Dict, List, Optional, Union + +from torch._logging import LazyString +from torch.onnx._internal import _beartype +from torch.onnx._internal.diagnostics.infra import sarif + + +# A list of types in the SARIF module to support pretty printing. +# This is solely for type annotation for the functions below. +_SarifClass = Union[ + sarif.SarifLog, + sarif.Run, + sarif.ReportingDescriptor, + sarif.Result, +] + + +def lazy_format_exception(exception: Exception) -> LazyString: + return LazyString( + lambda: "\n".join( + ( + "```", + *traceback.format_exception( + type(exception), exception, exception.__traceback__ + ), + "```", + ) + ), + ) + + +@_beartype.beartype +def snake_case_to_camel_case(s: str) -> str: + splits = s.split("_") + if len(splits) <= 1: + return s + return "".join([splits[0], *map(str.capitalize, splits[1:])]) + + +@_beartype.beartype +def camel_case_to_snake_case(s: str) -> str: + return re.sub(r"([A-Z])", r"_\1", s).lower() + + +@_beartype.beartype +def kebab_case_to_snake_case(s: str) -> str: + return s.replace("-", "_") + + +@_beartype.beartype +def _convert_key( + object: Union[Dict[str, Any], Any], convert: Callable[[str], str] +) -> Union[Dict[str, Any], Any]: + """Convert and update keys in a dictionary with "convert". + + Any value that is a dictionary will be recursively updated. + Any value that is a list will be recursively searched. + + Args: + object: The object to update. + convert: The function to convert the keys, e.g. `kebab_case_to_snake_case`. + + Returns: + The updated object. + """ + if not isinstance(object, Dict): + return object + new_dict = {} + for k, v in object.items(): + new_k = convert(k) + if isinstance(v, Dict): + new_v = _convert_key(v, convert) + elif isinstance(v, List): + new_v = [_convert_key(elem, convert) for elem in v] + else: + new_v = v + if new_v is None: + # Otherwise unnecessarily bloated sarif log with "null"s. + continue + if new_v == -1: + # WAR: -1 as default value shouldn't be logged into sarif. + continue + + new_dict[new_k] = new_v + + return new_dict + + +@_beartype.beartype +def sarif_to_json(attr_cls_obj: _SarifClass, indent: Optional[str] = " ") -> str: + dict = dataclasses.asdict(attr_cls_obj) + dict = _convert_key(dict, snake_case_to_camel_case) + return json.dumps(dict, indent=indent, separators=(",", ":")) + + +@_beartype.beartype +def format_argument(obj: Any) -> str: + return f"{type(obj)}" + + +@_beartype.beartype +def display_name(fn: Callable) -> str: + if hasattr(fn, "__qualname__"): + return fn.__qualname__ + elif hasattr(fn, "__name__"): + return fn.__name__ + else: + return str(fn) diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__init__.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34fd40e5b93879223c6d3ba97a6065ac03042fbc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__init__.py @@ -0,0 +1,100 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from torch.onnx._internal.diagnostics.infra.sarif._address import Address +from torch.onnx._internal.diagnostics.infra.sarif._artifact import Artifact +from torch.onnx._internal.diagnostics.infra.sarif._artifact_change import ArtifactChange +from torch.onnx._internal.diagnostics.infra.sarif._artifact_content import ( + ArtifactContent, +) +from torch.onnx._internal.diagnostics.infra.sarif._artifact_location import ( + ArtifactLocation, +) +from torch.onnx._internal.diagnostics.infra.sarif._attachment import Attachment +from torch.onnx._internal.diagnostics.infra.sarif._code_flow import CodeFlow +from torch.onnx._internal.diagnostics.infra.sarif._configuration_override import ( + ConfigurationOverride, +) +from torch.onnx._internal.diagnostics.infra.sarif._conversion import Conversion +from torch.onnx._internal.diagnostics.infra.sarif._edge import Edge +from torch.onnx._internal.diagnostics.infra.sarif._edge_traversal import EdgeTraversal +from torch.onnx._internal.diagnostics.infra.sarif._exception import Exception +from torch.onnx._internal.diagnostics.infra.sarif._external_properties import ( + ExternalProperties, +) +from torch.onnx._internal.diagnostics.infra.sarif._external_property_file_reference import ( + ExternalPropertyFileReference, +) +from torch.onnx._internal.diagnostics.infra.sarif._external_property_file_references import ( + ExternalPropertyFileReferences, +) +from torch.onnx._internal.diagnostics.infra.sarif._fix import Fix +from torch.onnx._internal.diagnostics.infra.sarif._graph import Graph +from torch.onnx._internal.diagnostics.infra.sarif._graph_traversal import GraphTraversal +from torch.onnx._internal.diagnostics.infra.sarif._invocation import Invocation +from torch.onnx._internal.diagnostics.infra.sarif._location import Location +from torch.onnx._internal.diagnostics.infra.sarif._location_relationship import ( + LocationRelationship, +) +from torch.onnx._internal.diagnostics.infra.sarif._logical_location import ( + LogicalLocation, +) +from torch.onnx._internal.diagnostics.infra.sarif._message import Message +from torch.onnx._internal.diagnostics.infra.sarif._multiformat_message_string import ( + MultiformatMessageString, +) +from torch.onnx._internal.diagnostics.infra.sarif._node import Node +from torch.onnx._internal.diagnostics.infra.sarif._notification import Notification +from torch.onnx._internal.diagnostics.infra.sarif._physical_location import ( + PhysicalLocation, +) +from torch.onnx._internal.diagnostics.infra.sarif._property_bag import PropertyBag +from torch.onnx._internal.diagnostics.infra.sarif._rectangle import Rectangle +from torch.onnx._internal.diagnostics.infra.sarif._region import Region +from torch.onnx._internal.diagnostics.infra.sarif._replacement import Replacement +from torch.onnx._internal.diagnostics.infra.sarif._reporting_configuration import ( + ReportingConfiguration, +) +from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor import ( + ReportingDescriptor, +) +from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor_reference import ( + ReportingDescriptorReference, +) +from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor_relationship import ( + ReportingDescriptorRelationship, +) +from torch.onnx._internal.diagnostics.infra.sarif._result import Result +from torch.onnx._internal.diagnostics.infra.sarif._result_provenance import ( + ResultProvenance, +) +from torch.onnx._internal.diagnostics.infra.sarif._run import Run +from torch.onnx._internal.diagnostics.infra.sarif._run_automation_details import ( + RunAutomationDetails, +) +from torch.onnx._internal.diagnostics.infra.sarif._sarif_log import SarifLog +from torch.onnx._internal.diagnostics.infra.sarif._special_locations import ( + SpecialLocations, +) +from torch.onnx._internal.diagnostics.infra.sarif._stack import Stack +from torch.onnx._internal.diagnostics.infra.sarif._stack_frame import StackFrame +from torch.onnx._internal.diagnostics.infra.sarif._suppression import Suppression +from torch.onnx._internal.diagnostics.infra.sarif._thread_flow import ThreadFlow +from torch.onnx._internal.diagnostics.infra.sarif._thread_flow_location import ( + ThreadFlowLocation, +) +from torch.onnx._internal.diagnostics.infra.sarif._tool import Tool +from torch.onnx._internal.diagnostics.infra.sarif._tool_component import ToolComponent +from torch.onnx._internal.diagnostics.infra.sarif._tool_component_reference import ( + ToolComponentReference, +) +from torch.onnx._internal.diagnostics.infra.sarif._translation_metadata import ( + TranslationMetadata, +) +from torch.onnx._internal.diagnostics.infra.sarif._version_control_details import ( + VersionControlDetails, +) +from torch.onnx._internal.diagnostics.infra.sarif._web_request import WebRequest +from torch.onnx._internal.diagnostics.infra.sarif._web_response import WebResponse + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0410417109fbea1dbd1f0b6a285645a6bcf12fc8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_fix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_physical_location.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_physical_location.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fd19bdc6102621e3c6c8244e2819bec5a8d79ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/__pycache__/_physical_location.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_address.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_address.py new file mode 100644 index 0000000000000000000000000000000000000000..df68b103374ae9af848c44ea41030967951c9be8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_address.py @@ -0,0 +1,48 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class Address(object): + """A physical or virtual address, or a range of addresses, in an 'addressable region' (memory or a binary file).""" + + absolute_address: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "absoluteAddress"} + ) + fully_qualified_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullyQualifiedName"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + kind: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "kind"} + ) + length: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "length"} + ) + name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "name"} + ) + offset_from_parent: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "offsetFromParent"} + ) + parent_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "parentIndex"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + relative_address: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "relativeAddress"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact.py new file mode 100644 index 0000000000000000000000000000000000000000..2f66167772488624c6fba14c0de36a0ffb846f0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact.py @@ -0,0 +1,88 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_content, + _artifact_location, + _message, + _property_bag, +) + + +@dataclasses.dataclass +class Artifact(object): + """A single artifact. In some cases, this artifact might be nested within another artifact.""" + + contents: Optional[_artifact_content.ArtifactContent] = dataclasses.field( + default=None, metadata={"schema_property_name": "contents"} + ) + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + encoding: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "encoding"} + ) + hashes: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "hashes"} + ) + last_modified_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "lastModifiedTimeUtc"} + ) + length: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "length"} + ) + location: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "location"} + ) + mime_type: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "mimeType"} + ) + offset: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "offset"} + ) + parent_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "parentIndex"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + roles: Optional[ + List[ + Literal[ + "analysisTarget", + "attachment", + "responseFile", + "resultFile", + "standardStream", + "tracedFile", + "unmodified", + "modified", + "added", + "deleted", + "renamed", + "uncontrolled", + "driver", + "extension", + "translation", + "taxonomy", + "policy", + "referencedOnCommandLine", + "memoryContents", + "directory", + "userSpecifiedConfiguration", + "toolSpecifiedConfiguration", + "debugOutputFile", + ] + ] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "roles"}) + source_language: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "sourceLanguage"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_change.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_change.py new file mode 100644 index 0000000000000000000000000000000000000000..f8cca329f25b13bceb6469e5a59321bcc29c643e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_change.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _property_bag, + _replacement, +) + + +@dataclasses.dataclass +class ArtifactChange(object): + """A change to a single artifact.""" + + artifact_location: _artifact_location.ArtifactLocation = dataclasses.field( + metadata={"schema_property_name": "artifactLocation"} + ) + replacements: List[_replacement.Replacement] = dataclasses.field( + metadata={"schema_property_name": "replacements"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_content.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_content.py new file mode 100644 index 0000000000000000000000000000000000000000..134c89841bf2148930385cfa0ca8a521d500f06f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_content.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _multiformat_message_string, + _property_bag, +) + + +@dataclasses.dataclass +class ArtifactContent(object): + """Represents the contents of an artifact.""" + + binary: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "binary"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + rendered: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field(default=None, metadata={"schema_property_name": "rendered"}) + text: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "text"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_location.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_location.py new file mode 100644 index 0000000000000000000000000000000000000000..96e6dbba31599933fb054b95eb862a534d912996 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_artifact_location.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag + + +@dataclasses.dataclass +class ArtifactLocation(object): + """Specifies the location of an artifact.""" + + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "uri"} + ) + uri_base_id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "uriBaseId"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_attachment.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_attachment.py new file mode 100644 index 0000000000000000000000000000000000000000..4e5ee6d13fadca1d7028b148d877a29a7e94c69e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_attachment.py @@ -0,0 +1,39 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _message, + _property_bag, + _rectangle, + _region, +) + + +@dataclasses.dataclass +class Attachment(object): + """An artifact relevant to a result.""" + + artifact_location: _artifact_location.ArtifactLocation = dataclasses.field( + metadata={"schema_property_name": "artifactLocation"} + ) + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + rectangles: Optional[List[_rectangle.Rectangle]] = dataclasses.field( + default=None, metadata={"schema_property_name": "rectangles"} + ) + regions: Optional[List[_region.Region]] = dataclasses.field( + default=None, metadata={"schema_property_name": "regions"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_code_flow.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_code_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..5515ef78bee12bc129aeb41da6d1a8db2c3e0810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_code_flow.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _message, + _property_bag, + _thread_flow, +) + + +@dataclasses.dataclass +class CodeFlow(object): + """A set of threadFlows which together describe a pattern of code execution relevant to detecting a result.""" + + thread_flows: List[_thread_flow.ThreadFlow] = dataclasses.field( + metadata={"schema_property_name": "threadFlows"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_configuration_override.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_configuration_override.py new file mode 100644 index 0000000000000000000000000000000000000000..be32e77ff4e18c10b9a8576c857594133c9d3613 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_configuration_override.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _property_bag, + _reporting_configuration, + _reporting_descriptor_reference, +) + + +@dataclasses.dataclass +class ConfigurationOverride(object): + """Information about how a specific rule or notification was reconfigured at runtime.""" + + configuration: _reporting_configuration.ReportingConfiguration = dataclasses.field( + metadata={"schema_property_name": "configuration"} + ) + descriptor: _reporting_descriptor_reference.ReportingDescriptorReference = ( + dataclasses.field(metadata={"schema_property_name": "descriptor"}) + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_conversion.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..522202bc78ddd27ec9dba72469b4fb4d4f251f86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_conversion.py @@ -0,0 +1,35 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _invocation, + _property_bag, + _tool, +) + + +@dataclasses.dataclass +class Conversion(object): + """Describes how a converter transformed the output of a static analysis tool from the analysis tool's native output format into the SARIF format.""" + + tool: _tool.Tool = dataclasses.field(metadata={"schema_property_name": "tool"}) + analysis_tool_log_files: Optional[ + List[_artifact_location.ArtifactLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "analysisToolLogFiles"} + ) + invocation: Optional[_invocation.Invocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "invocation"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge.py new file mode 100644 index 0000000000000000000000000000000000000000..f85ec8dd99c424419dbaa1ea6cb2ebb1690b4f92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag + + +@dataclasses.dataclass +class Edge(object): + """Represents a directed edge in a graph.""" + + id: str = dataclasses.field(metadata={"schema_property_name": "id"}) + source_node_id: str = dataclasses.field( + metadata={"schema_property_name": "sourceNodeId"} + ) + target_node_id: str = dataclasses.field( + metadata={"schema_property_name": "targetNodeId"} + ) + label: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "label"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge_traversal.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge_traversal.py new file mode 100644 index 0000000000000000000000000000000000000000..a8f12d921704b55654991fb0ff7e3ca73aa63f4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_edge_traversal.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag + + +@dataclasses.dataclass +class EdgeTraversal(object): + """Represents the traversal of a single edge during a graph traversal.""" + + edge_id: str = dataclasses.field(metadata={"schema_property_name": "edgeId"}) + final_state: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "finalState"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + step_over_edge_count: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "stepOverEdgeCount"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_exception.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_exception.py new file mode 100644 index 0000000000000000000000000000000000000000..9afa806332413726547a5d71d86d45e5a20c18a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_exception.py @@ -0,0 +1,37 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _exception, + _property_bag, + _stack, +) + + +@dataclasses.dataclass +class Exception(object): + """Describes a runtime exception encountered during the execution of an analysis tool.""" + + inner_exceptions: Optional[List[_exception.Exception]] = dataclasses.field( + default=None, metadata={"schema_property_name": "innerExceptions"} + ) + kind: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "kind"} + ) + message: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + stack: Optional[_stack.Stack] = dataclasses.field( + default=None, metadata={"schema_property_name": "stack"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_properties.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_properties.py new file mode 100644 index 0000000000000000000000000000000000000000..ae5a530a090f59870c2faf927f1863b344aeab2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_properties.py @@ -0,0 +1,98 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _address, + _artifact, + _conversion, + _graph, + _invocation, + _logical_location, + _property_bag, + _result, + _thread_flow_location, + _tool_component, + _web_request, + _web_response, +) + + +@dataclasses.dataclass +class ExternalProperties(object): + """The top-level element of an external property file.""" + + addresses: Optional[List[_address.Address]] = dataclasses.field( + default=None, metadata={"schema_property_name": "addresses"} + ) + artifacts: Optional[List[_artifact.Artifact]] = dataclasses.field( + default=None, metadata={"schema_property_name": "artifacts"} + ) + conversion: Optional[_conversion.Conversion] = dataclasses.field( + default=None, metadata={"schema_property_name": "conversion"} + ) + driver: Optional[_tool_component.ToolComponent] = dataclasses.field( + default=None, metadata={"schema_property_name": "driver"} + ) + extensions: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "extensions"} + ) + externalized_properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "externalizedProperties"} + ) + graphs: Optional[List[_graph.Graph]] = dataclasses.field( + default=None, metadata={"schema_property_name": "graphs"} + ) + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + invocations: Optional[List[_invocation.Invocation]] = dataclasses.field( + default=None, metadata={"schema_property_name": "invocations"} + ) + logical_locations: Optional[ + List[_logical_location.LogicalLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "logicalLocations"} + ) + policies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "policies"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + results: Optional[List[_result.Result]] = dataclasses.field( + default=None, metadata={"schema_property_name": "results"} + ) + run_guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "runGuid"} + ) + schema: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "schema"} + ) + taxonomies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "taxonomies"} + ) + thread_flow_locations: Optional[ + List[_thread_flow_location.ThreadFlowLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "threadFlowLocations"} + ) + translations: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "translations"} + ) + version: Optional[Literal["2.1.0"]] = dataclasses.field( + default=None, metadata={"schema_property_name": "version"} + ) + web_requests: Optional[List[_web_request.WebRequest]] = dataclasses.field( + default=None, metadata={"schema_property_name": "webRequests"} + ) + web_responses: Optional[List[_web_response.WebResponse]] = dataclasses.field( + default=None, metadata={"schema_property_name": "webResponses"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_reference.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..13a472fec9a34f640331ed2310b6a4c933fdbc9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_reference.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _property_bag, +) + + +@dataclasses.dataclass +class ExternalPropertyFileReference(object): + """Contains information that enables a SARIF consumer to locate the external property file that contains the value of an externalized property associated with the run.""" + + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + item_count: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "itemCount"} + ) + location: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "location"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_references.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_references.py new file mode 100644 index 0000000000000000000000000000000000000000..78ae2db62708adb20fd8e9246854c56d7db8a3b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_external_property_file_references.py @@ -0,0 +1,86 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _external_property_file_reference, + _property_bag, +) + + +@dataclasses.dataclass +class ExternalPropertyFileReferences(object): + """References to external property files that should be inlined with the content of a root log file.""" + + addresses: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "addresses"}) + artifacts: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "artifacts"}) + conversion: Optional[ + _external_property_file_reference.ExternalPropertyFileReference + ] = dataclasses.field(default=None, metadata={"schema_property_name": "conversion"}) + driver: Optional[ + _external_property_file_reference.ExternalPropertyFileReference + ] = dataclasses.field(default=None, metadata={"schema_property_name": "driver"}) + extensions: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "extensions"}) + externalized_properties: Optional[ + _external_property_file_reference.ExternalPropertyFileReference + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "externalizedProperties"} + ) + graphs: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "graphs"}) + invocations: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "invocations"} + ) + logical_locations: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "logicalLocations"} + ) + policies: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "policies"}) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + results: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "results"}) + taxonomies: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "taxonomies"}) + thread_flow_locations: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "threadFlowLocations"} + ) + translations: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "translations"} + ) + web_requests: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "webRequests"} + ) + web_responses: Optional[ + List[_external_property_file_reference.ExternalPropertyFileReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "webResponses"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_fix.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_fix.py new file mode 100644 index 0000000000000000000000000000000000000000..5e3b944aa23983b9fbaa1e7015921124ae0f7c75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_fix.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_change, + _message, + _property_bag, +) + + +@dataclasses.dataclass +class Fix(object): + """A proposed fix for the problem represented by a result object. A fix specifies a set of artifacts to modify. For each artifact, it specifies a set of bytes to remove, and provides a set of new bytes to replace them.""" + + artifact_changes: List[_artifact_change.ArtifactChange] = dataclasses.field( + metadata={"schema_property_name": "artifactChanges"} + ) + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..306d6b305126a796e9f67c0b69d81ec0f22fad40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph.py @@ -0,0 +1,35 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _edge, + _message, + _node, + _property_bag, +) + + +@dataclasses.dataclass +class Graph(object): + """A network of nodes and directed edges that describes some aspect of the structure of the code (for example, a call graph).""" + + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + edges: Optional[List[_edge.Edge]] = dataclasses.field( + default=None, metadata={"schema_property_name": "edges"} + ) + nodes: Optional[List[_node.Node]] = dataclasses.field( + default=None, metadata={"schema_property_name": "nodes"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph_traversal.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph_traversal.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc25c4591a2da71cdde040731ee1a472a4cca7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_graph_traversal.py @@ -0,0 +1,43 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _edge_traversal, + _message, + _property_bag, +) + + +@dataclasses.dataclass +class GraphTraversal(object): + """Represents a path through a graph.""" + + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + edge_traversals: Optional[List[_edge_traversal.EdgeTraversal]] = dataclasses.field( + default=None, metadata={"schema_property_name": "edgeTraversals"} + ) + immutable_state: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "immutableState"} + ) + initial_state: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "initialState"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + result_graph_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "resultGraphIndex"} + ) + run_graph_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "runGraphIndex"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_invocation.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_invocation.py new file mode 100644 index 0000000000000000000000000000000000000000..77fb36997507483f20fc89154f2b90532abbcc94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_invocation.py @@ -0,0 +1,117 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _configuration_override, + _notification, + _property_bag, +) + + +@dataclasses.dataclass +class Invocation(object): + """The runtime environment of the analysis tool run.""" + + execution_successful: bool = dataclasses.field( + metadata={"schema_property_name": "executionSuccessful"} + ) + account: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "account"} + ) + arguments: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "arguments"} + ) + command_line: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "commandLine"} + ) + end_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "endTimeUtc"} + ) + environment_variables: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "environmentVariables"} + ) + executable_location: Optional[ + _artifact_location.ArtifactLocation + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "executableLocation"} + ) + exit_code: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitCode"} + ) + exit_code_description: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitCodeDescription"} + ) + exit_signal_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitSignalName"} + ) + exit_signal_number: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "exitSignalNumber"} + ) + machine: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "machine"} + ) + notification_configuration_overrides: Optional[ + List[_configuration_override.ConfigurationOverride] + ] = dataclasses.field( + default=None, + metadata={"schema_property_name": "notificationConfigurationOverrides"}, + ) + process_id: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "processId"} + ) + process_start_failure_message: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "processStartFailureMessage"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + response_files: Optional[ + List[_artifact_location.ArtifactLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "responseFiles"} + ) + rule_configuration_overrides: Optional[ + List[_configuration_override.ConfigurationOverride] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "ruleConfigurationOverrides"} + ) + start_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "startTimeUtc"} + ) + stderr: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stderr"} + ) + stdin: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stdin"} + ) + stdout: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stdout"} + ) + stdout_stderr: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "stdoutStderr"} + ) + tool_configuration_notifications: Optional[ + List[_notification.Notification] + ] = dataclasses.field( + default=None, + metadata={"schema_property_name": "toolConfigurationNotifications"}, + ) + tool_execution_notifications: Optional[ + List[_notification.Notification] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "toolExecutionNotifications"} + ) + working_directory: Optional[ + _artifact_location.ArtifactLocation + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "workingDirectory"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location.py new file mode 100644 index 0000000000000000000000000000000000000000..06ce42546e125384f6191f378cc8925c99d7e8a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location.py @@ -0,0 +1,50 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _location_relationship, + _logical_location, + _message, + _physical_location, + _property_bag, + _region, +) + + +@dataclasses.dataclass +class Location(object): + """A location within a programming artifact.""" + + annotations: Optional[List[_region.Region]] = dataclasses.field( + default=None, metadata={"schema_property_name": "annotations"} + ) + id: int = dataclasses.field(default=-1, metadata={"schema_property_name": "id"}) + logical_locations: Optional[ + List[_logical_location.LogicalLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "logicalLocations"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + physical_location: Optional[ + _physical_location.PhysicalLocation + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "physicalLocation"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + relationships: Optional[ + List[_location_relationship.LocationRelationship] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "relationships"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location_relationship.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location_relationship.py new file mode 100644 index 0000000000000000000000000000000000000000..92f6c4128e04b8e51e832deaeb82debf48ee02f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_location_relationship.py @@ -0,0 +1,28 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag + + +@dataclasses.dataclass +class LocationRelationship(object): + """Information about the relation of one location to another.""" + + target: int = dataclasses.field(metadata={"schema_property_name": "target"}) + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + kinds: List[str] = dataclasses.field( + default_factory=lambda: ["relevant"], metadata={"schema_property_name": "kinds"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_logical_location.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_logical_location.py new file mode 100644 index 0000000000000000000000000000000000000000..70e67528a95a811e0b2d1ce9a83171728c88d758 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_logical_location.py @@ -0,0 +1,39 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class LogicalLocation(object): + """A logical location of a construct that produced a result.""" + + decorated_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "decoratedName"} + ) + fully_qualified_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullyQualifiedName"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + kind: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "kind"} + ) + name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "name"} + ) + parent_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "parentIndex"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_message.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_message.py new file mode 100644 index 0000000000000000000000000000000000000000..03528747fa5abbb2aa0a62cbefdd2af4158bc9b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_message.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class Message(object): + """Encapsulates a message intended to be read by the end user.""" + + arguments: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "arguments"} + ) + id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "id"} + ) + markdown: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "markdown"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + text: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "text"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_multiformat_message_string.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_multiformat_message_string.py new file mode 100644 index 0000000000000000000000000000000000000000..c1dede2923f4bf01a913a3920dc3f5d2f12befc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_multiformat_message_string.py @@ -0,0 +1,25 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class MultiformatMessageString(object): + """A message string or message format string rendered in multiple formats.""" + + text: str = dataclasses.field(metadata={"schema_property_name": "text"}) + markdown: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "markdown"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_node.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_node.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b3f74db3a2930a21db7da4c9e23469d63e4ca5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_node.py @@ -0,0 +1,36 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _location, + _message, + _node, + _property_bag, +) + + +@dataclasses.dataclass +class Node(object): + """Represents a node in a graph.""" + + id: str = dataclasses.field(metadata={"schema_property_name": "id"}) + children: Optional[List[_node.Node]] = dataclasses.field( + default=None, metadata={"schema_property_name": "children"} + ) + label: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "label"} + ) + location: Optional[_location.Location] = dataclasses.field( + default=None, metadata={"schema_property_name": "location"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_notification.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_notification.py new file mode 100644 index 0000000000000000000000000000000000000000..9ffb40b4d19bb1c2e1d4311dadee56573b471ac8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_notification.py @@ -0,0 +1,53 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _exception, + _location, + _message, + _property_bag, + _reporting_descriptor_reference, +) + + +@dataclasses.dataclass +class Notification(object): + """Describes a condition relevant to the tool itself, as opposed to being relevant to a target being analyzed by the tool.""" + + message: _message.Message = dataclasses.field( + metadata={"schema_property_name": "message"} + ) + associated_rule: Optional[ + _reporting_descriptor_reference.ReportingDescriptorReference + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "associatedRule"} + ) + descriptor: Optional[ + _reporting_descriptor_reference.ReportingDescriptorReference + ] = dataclasses.field(default=None, metadata={"schema_property_name": "descriptor"}) + exception: Optional[_exception.Exception] = dataclasses.field( + default=None, metadata={"schema_property_name": "exception"} + ) + level: Literal["none", "note", "warning", "error"] = dataclasses.field( + default="warning", metadata={"schema_property_name": "level"} + ) + locations: Optional[List[_location.Location]] = dataclasses.field( + default=None, metadata={"schema_property_name": "locations"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + thread_id: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "threadId"} + ) + time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "timeUtc"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_physical_location.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_physical_location.py new file mode 100644 index 0000000000000000000000000000000000000000..bd527d3ecd965fb0962901601c88af4ca24d092c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_physical_location.py @@ -0,0 +1,40 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _address, + _artifact_location, + _property_bag, + _region, +) + + +@dataclasses.dataclass +class PhysicalLocation(object): + """A physical location relevant to a result. Specifies a reference to a programming artifact together with a range of bytes or characters within that artifact.""" + + address: Optional[_address.Address] = dataclasses.field( + default=None, metadata={"schema_property_name": "address"} + ) + artifact_location: Optional[ + _artifact_location.ArtifactLocation + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "artifactLocation"} + ) + context_region: Optional[_region.Region] = dataclasses.field( + default=None, metadata={"schema_property_name": "contextRegion"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + region: Optional[_region.Region] = dataclasses.field( + default=None, metadata={"schema_property_name": "region"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_property_bag.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_property_bag.py new file mode 100644 index 0000000000000000000000000000000000000000..eb576b4dbd11c45e9588d79681f0d3e97f93f965 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_property_bag.py @@ -0,0 +1,19 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + + +@dataclasses.dataclass +class PropertyBag(object): + """Key/value pairs that provide additional information about the object.""" + + tags: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "tags"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_rectangle.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_rectangle.py new file mode 100644 index 0000000000000000000000000000000000000000..cf24d758252657d2e415cbe39dd2bc1a9aa51684 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_rectangle.py @@ -0,0 +1,36 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag + + +@dataclasses.dataclass +class Rectangle(object): + """An area within an image.""" + + bottom: Optional[float] = dataclasses.field( + default=None, metadata={"schema_property_name": "bottom"} + ) + left: Optional[float] = dataclasses.field( + default=None, metadata={"schema_property_name": "left"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + right: Optional[float] = dataclasses.field( + default=None, metadata={"schema_property_name": "right"} + ) + top: Optional[float] = dataclasses.field( + default=None, metadata={"schema_property_name": "top"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_region.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_region.py new file mode 100644 index 0000000000000000000000000000000000000000..658fdb121734ee7f3af7a8fbc1da82ac2553a461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_region.py @@ -0,0 +1,58 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_content, + _message, + _property_bag, +) + + +@dataclasses.dataclass +class Region(object): + """A region within an artifact where a result was detected.""" + + byte_length: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "byteLength"} + ) + byte_offset: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "byteOffset"} + ) + char_length: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "charLength"} + ) + char_offset: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "charOffset"} + ) + end_column: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "endColumn"} + ) + end_line: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "endLine"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + snippet: Optional[_artifact_content.ArtifactContent] = dataclasses.field( + default=None, metadata={"schema_property_name": "snippet"} + ) + source_language: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "sourceLanguage"} + ) + start_column: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "startColumn"} + ) + start_line: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "startLine"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_replacement.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_replacement.py new file mode 100644 index 0000000000000000000000000000000000000000..9acbc9d8133a118c3aa215ee42f7767178f6d19b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_replacement.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_content, + _property_bag, + _region, +) + + +@dataclasses.dataclass +class Replacement(object): + """The replacement of a single region of an artifact.""" + + deleted_region: _region.Region = dataclasses.field( + metadata={"schema_property_name": "deletedRegion"} + ) + inserted_content: Optional[_artifact_content.ArtifactContent] = dataclasses.field( + default=None, metadata={"schema_property_name": "insertedContent"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_configuration.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc74a9fb35b7c7ba80d7534f1dd0b9dfca765ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_configuration.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class ReportingConfiguration(object): + """Information about a rule or notification that can be configured at runtime.""" + + enabled: bool = dataclasses.field( + default=True, metadata={"schema_property_name": "enabled"} + ) + level: Literal["none", "note", "warning", "error"] = dataclasses.field( + default="warning", metadata={"schema_property_name": "level"} + ) + parameters: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "parameters"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + rank: float = dataclasses.field( + default=-1.0, metadata={"schema_property_name": "rank"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor.py new file mode 100644 index 0000000000000000000000000000000000000000..f562f2f81ba5174b67c24a78d811f5fdc5d069b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor.py @@ -0,0 +1,71 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _multiformat_message_string, + _property_bag, + _reporting_configuration, + _reporting_descriptor_relationship, +) + + +@dataclasses.dataclass +class ReportingDescriptor(object): + """Metadata that describes a specific report produced by the tool, as part of the analysis it provides or its runtime reporting.""" + + id: str = dataclasses.field(metadata={"schema_property_name": "id"}) + default_configuration: Optional[ + _reporting_configuration.ReportingConfiguration + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "defaultConfiguration"} + ) + deprecated_guids: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "deprecatedGuids"} + ) + deprecated_ids: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "deprecatedIds"} + ) + deprecated_names: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "deprecatedNames"} + ) + full_description: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullDescription"} + ) + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + help: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field(default=None, metadata={"schema_property_name": "help"}) + help_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "helpUri"} + ) + message_strings: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "messageStrings"} + ) + name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "name"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + relationships: Optional[ + List[_reporting_descriptor_relationship.ReportingDescriptorRelationship] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "relationships"} + ) + short_description: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "shortDescription"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor_reference.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..4d057d508446ea24fff5320219b69e0cdef51743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor_reference.py @@ -0,0 +1,38 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _property_bag, + _tool_component_reference, +) + + +@dataclasses.dataclass +class ReportingDescriptorReference(object): + """Information about how to locate a relevant reporting descriptor.""" + + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "id"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + tool_component: Optional[ + _tool_component_reference.ToolComponentReference + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "toolComponent"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor_relationship.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor_relationship.py new file mode 100644 index 0000000000000000000000000000000000000000..b66bd1bb4c0f359c038df836c8f4f55516e64a9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_reporting_descriptor_relationship.py @@ -0,0 +1,34 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _message, + _property_bag, + _reporting_descriptor_reference, +) + + +@dataclasses.dataclass +class ReportingDescriptorRelationship(object): + """Information about the relation of one reporting descriptor to another.""" + + target: _reporting_descriptor_reference.ReportingDescriptorReference = ( + dataclasses.field(metadata={"schema_property_name": "target"}) + ) + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + kinds: List[str] = dataclasses.field( + default_factory=lambda: ["relevant"], metadata={"schema_property_name": "kinds"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_result.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_result.py new file mode 100644 index 0000000000000000000000000000000000000000..829cd3cdf5dc954c4b3180c0c39605d6f83f02ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_result.py @@ -0,0 +1,128 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _attachment, + _code_flow, + _fix, + _graph, + _graph_traversal, + _location, + _message, + _property_bag, + _reporting_descriptor_reference, + _result_provenance, + _stack, + _suppression, + _web_request, + _web_response, +) + + +@dataclasses.dataclass +class Result(object): + """A result produced by an analysis tool.""" + + message: _message.Message = dataclasses.field( + metadata={"schema_property_name": "message"} + ) + analysis_target: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "analysisTarget"} + ) + attachments: Optional[List[_attachment.Attachment]] = dataclasses.field( + default=None, metadata={"schema_property_name": "attachments"} + ) + baseline_state: Optional[ + Literal["new", "unchanged", "updated", "absent"] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "baselineState"} + ) + code_flows: Optional[List[_code_flow.CodeFlow]] = dataclasses.field( + default=None, metadata={"schema_property_name": "codeFlows"} + ) + correlation_guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "correlationGuid"} + ) + fingerprints: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "fingerprints"} + ) + fixes: Optional[List[_fix.Fix]] = dataclasses.field( + default=None, metadata={"schema_property_name": "fixes"} + ) + graph_traversals: Optional[ + List[_graph_traversal.GraphTraversal] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "graphTraversals"} + ) + graphs: Optional[List[_graph.Graph]] = dataclasses.field( + default=None, metadata={"schema_property_name": "graphs"} + ) + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + hosted_viewer_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "hostedViewerUri"} + ) + kind: Literal[ + "notApplicable", "pass", "fail", "review", "open", "informational" + ] = dataclasses.field(default="fail", metadata={"schema_property_name": "kind"}) + level: Literal["none", "note", "warning", "error"] = dataclasses.field( + default="warning", metadata={"schema_property_name": "level"} + ) + locations: Optional[List[_location.Location]] = dataclasses.field( + default=None, metadata={"schema_property_name": "locations"} + ) + occurrence_count: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "occurrenceCount"} + ) + partial_fingerprints: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "partialFingerprints"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + provenance: Optional[_result_provenance.ResultProvenance] = dataclasses.field( + default=None, metadata={"schema_property_name": "provenance"} + ) + rank: float = dataclasses.field( + default=-1.0, metadata={"schema_property_name": "rank"} + ) + related_locations: Optional[List[_location.Location]] = dataclasses.field( + default=None, metadata={"schema_property_name": "relatedLocations"} + ) + rule: Optional[ + _reporting_descriptor_reference.ReportingDescriptorReference + ] = dataclasses.field(default=None, metadata={"schema_property_name": "rule"}) + rule_id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "ruleId"} + ) + rule_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "ruleIndex"} + ) + stacks: Optional[List[_stack.Stack]] = dataclasses.field( + default=None, metadata={"schema_property_name": "stacks"} + ) + suppressions: Optional[List[_suppression.Suppression]] = dataclasses.field( + default=None, metadata={"schema_property_name": "suppressions"} + ) + taxa: Optional[ + List[_reporting_descriptor_reference.ReportingDescriptorReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "taxa"}) + web_request: Optional[_web_request.WebRequest] = dataclasses.field( + default=None, metadata={"schema_property_name": "webRequest"} + ) + web_response: Optional[_web_response.WebResponse] = dataclasses.field( + default=None, metadata={"schema_property_name": "webResponse"} + ) + work_item_uris: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "workItemUris"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_result_provenance.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_result_provenance.py new file mode 100644 index 0000000000000000000000000000000000000000..e542414a8da533445256edf5e1887945439305b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_result_provenance.py @@ -0,0 +1,44 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _physical_location, + _property_bag, +) + + +@dataclasses.dataclass +class ResultProvenance(object): + """Contains information about how and when a result was detected.""" + + conversion_sources: Optional[ + List[_physical_location.PhysicalLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "conversionSources"} + ) + first_detection_run_guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "firstDetectionRunGuid"} + ) + first_detection_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "firstDetectionTimeUtc"} + ) + invocation_index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "invocationIndex"} + ) + last_detection_run_guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "lastDetectionRunGuid"} + ) + last_detection_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "lastDetectionTimeUtc"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_run.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_run.py new file mode 100644 index 0000000000000000000000000000000000000000..e2aca9ba5e32e01e48ddcafefc4dd9a077b6bf94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_run.py @@ -0,0 +1,134 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _address, + _artifact, + _conversion, + _external_property_file_references, + _graph, + _invocation, + _logical_location, + _property_bag, + _result, + _run_automation_details, + _special_locations, + _thread_flow_location, + _tool, + _tool_component, + _version_control_details, + _web_request, + _web_response, +) + + +@dataclasses.dataclass +class Run(object): + """Describes a single run of an analysis tool, and contains the reported output of that run.""" + + tool: _tool.Tool = dataclasses.field(metadata={"schema_property_name": "tool"}) + addresses: Optional[List[_address.Address]] = dataclasses.field( + default=None, metadata={"schema_property_name": "addresses"} + ) + artifacts: Optional[List[_artifact.Artifact]] = dataclasses.field( + default=None, metadata={"schema_property_name": "artifacts"} + ) + automation_details: Optional[ + _run_automation_details.RunAutomationDetails + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "automationDetails"} + ) + baseline_guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "baselineGuid"} + ) + column_kind: Optional[ + Literal["utf16CodeUnits", "unicodeCodePoints"] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "columnKind"}) + conversion: Optional[_conversion.Conversion] = dataclasses.field( + default=None, metadata={"schema_property_name": "conversion"} + ) + default_encoding: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "defaultEncoding"} + ) + default_source_language: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "defaultSourceLanguage"} + ) + external_property_file_references: Optional[ + _external_property_file_references.ExternalPropertyFileReferences + ] = dataclasses.field( + default=None, + metadata={"schema_property_name": "externalPropertyFileReferences"}, + ) + graphs: Optional[List[_graph.Graph]] = dataclasses.field( + default=None, metadata={"schema_property_name": "graphs"} + ) + invocations: Optional[List[_invocation.Invocation]] = dataclasses.field( + default=None, metadata={"schema_property_name": "invocations"} + ) + language: str = dataclasses.field( + default="en-US", metadata={"schema_property_name": "language"} + ) + logical_locations: Optional[ + List[_logical_location.LogicalLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "logicalLocations"} + ) + newline_sequences: List[str] = dataclasses.field( + default_factory=lambda: ["\r\n", "\n"], + metadata={"schema_property_name": "newlineSequences"}, + ) + original_uri_base_ids: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "originalUriBaseIds"} + ) + policies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "policies"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + redaction_tokens: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "redactionTokens"} + ) + results: Optional[List[_result.Result]] = dataclasses.field( + default=None, metadata={"schema_property_name": "results"} + ) + run_aggregates: Optional[ + List[_run_automation_details.RunAutomationDetails] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "runAggregates"} + ) + special_locations: Optional[ + _special_locations.SpecialLocations + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "specialLocations"} + ) + taxonomies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "taxonomies"} + ) + thread_flow_locations: Optional[ + List[_thread_flow_location.ThreadFlowLocation] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "threadFlowLocations"} + ) + translations: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "translations"} + ) + version_control_provenance: Optional[ + List[_version_control_details.VersionControlDetails] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "versionControlProvenance"} + ) + web_requests: Optional[List[_web_request.WebRequest]] = dataclasses.field( + default=None, metadata={"schema_property_name": "webRequests"} + ) + web_responses: Optional[List[_web_response.WebResponse]] = dataclasses.field( + default=None, metadata={"schema_property_name": "webResponses"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_run_automation_details.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_run_automation_details.py new file mode 100644 index 0000000000000000000000000000000000000000..ae63959240b34fd8d71d3eef87d40c240717431d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_run_automation_details.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag + + +@dataclasses.dataclass +class RunAutomationDetails(object): + """Information that describes a run's identity and role within an engineering system process.""" + + correlation_guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "correlationGuid"} + ) + description: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "description"} + ) + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "id"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_sarif_log.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_sarif_log.py new file mode 100644 index 0000000000000000000000000000000000000000..c738222981e59eb4fc528d0b74088bd3134024dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_sarif_log.py @@ -0,0 +1,37 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _external_properties, + _property_bag, + _run, +) + + +@dataclasses.dataclass +class SarifLog(object): + """Static Analysis Results Format (SARIF) Version 2.1.0 JSON Schema: a standard format for the output of static analysis tools.""" + + runs: List[_run.Run] = dataclasses.field(metadata={"schema_property_name": "runs"}) + version: Literal["2.1.0"] = dataclasses.field( + metadata={"schema_property_name": "version"} + ) + schema_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "$schema"} + ) + inline_external_properties: Optional[ + List[_external_properties.ExternalProperties] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "inlineExternalProperties"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_special_locations.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_special_locations.py new file mode 100644 index 0000000000000000000000000000000000000000..77bd3ccff59d47f815ac215b7b2f8f08f3521a7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_special_locations.py @@ -0,0 +1,27 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _property_bag, +) + + +@dataclasses.dataclass +class SpecialLocations(object): + """Defines locations of special significance to SARIF consumers.""" + + display_base: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "displayBase"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_stack.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..cad11f5ec38c0e265953b0dafd8de86cbaaada34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_stack.py @@ -0,0 +1,31 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _message, + _property_bag, + _stack_frame, +) + + +@dataclasses.dataclass +class Stack(object): + """A call stack that is relevant to a result.""" + + frames: List[_stack_frame.StackFrame] = dataclasses.field( + metadata={"schema_property_name": "frames"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_stack_frame.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_stack_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..509756f3374ab84c2277206cadc9d881d1fcc9ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_stack_frame.py @@ -0,0 +1,33 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag + + +@dataclasses.dataclass +class StackFrame(object): + """A function call within a stack trace.""" + + location: Optional[_location.Location] = dataclasses.field( + default=None, metadata={"schema_property_name": "location"} + ) + module: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "module"} + ) + parameters: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "parameters"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + thread_id: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "threadId"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_suppression.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_suppression.py new file mode 100644 index 0000000000000000000000000000000000000000..c1dcb014809d994a4777917e5e1764388b48dff5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_suppression.py @@ -0,0 +1,36 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag + + +@dataclasses.dataclass +class Suppression(object): + """A suppression that is relevant to a result.""" + + kind: Literal["inSource", "external"] = dataclasses.field( + metadata={"schema_property_name": "kind"} + ) + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + justification: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "justification"} + ) + location: Optional[_location.Location] = dataclasses.field( + default=None, metadata={"schema_property_name": "location"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + state: Optional[Literal["accepted", "underReview", "rejected"]] = dataclasses.field( + default=None, metadata={"schema_property_name": "state"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_thread_flow.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_thread_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..b107ee774c9241bb049f3068bf6dc500b21f1575 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_thread_flow.py @@ -0,0 +1,40 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _message, + _property_bag, + _thread_flow_location, +) + + +@dataclasses.dataclass +class ThreadFlow(object): + """Describes a sequence of code locations that specify a path through a single thread of execution such as an operating system or fiber.""" + + locations: List[_thread_flow_location.ThreadFlowLocation] = dataclasses.field( + metadata={"schema_property_name": "locations"} + ) + id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "id"} + ) + immutable_state: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "immutableState"} + ) + initial_state: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "initialState"} + ) + message: Optional[_message.Message] = dataclasses.field( + default=None, metadata={"schema_property_name": "message"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_thread_flow_location.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_thread_flow_location.py new file mode 100644 index 0000000000000000000000000000000000000000..43c67cf62ccf79a75d5227d0b2018bc29d416640 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_thread_flow_location.py @@ -0,0 +1,67 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _location, + _property_bag, + _reporting_descriptor_reference, + _stack, + _web_request, + _web_response, +) + + +@dataclasses.dataclass +class ThreadFlowLocation(object): + """A location visited by an analysis tool while simulating or monitoring the execution of a program.""" + + execution_order: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "executionOrder"} + ) + execution_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "executionTimeUtc"} + ) + importance: Literal["important", "essential", "unimportant"] = dataclasses.field( + default="important", metadata={"schema_property_name": "importance"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + kinds: Optional[List[str]] = dataclasses.field( + default=None, metadata={"schema_property_name": "kinds"} + ) + location: Optional[_location.Location] = dataclasses.field( + default=None, metadata={"schema_property_name": "location"} + ) + module: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "module"} + ) + nesting_level: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "nestingLevel"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + stack: Optional[_stack.Stack] = dataclasses.field( + default=None, metadata={"schema_property_name": "stack"} + ) + state: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "state"} + ) + taxa: Optional[ + List[_reporting_descriptor_reference.ReportingDescriptorReference] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "taxa"}) + web_request: Optional[_web_request.WebRequest] = dataclasses.field( + default=None, metadata={"schema_property_name": "webRequest"} + ) + web_response: Optional[_web_response.WebResponse] = dataclasses.field( + default=None, metadata={"schema_property_name": "webResponse"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..a6cfa87b05d7599cdc4cefe203180b38dc39c1dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool.py @@ -0,0 +1,27 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import List, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag, _tool_component + + +@dataclasses.dataclass +class Tool(object): + """The analysis tool that was run.""" + + driver: _tool_component.ToolComponent = dataclasses.field( + metadata={"schema_property_name": "driver"} + ) + extensions: Optional[List[_tool_component.ToolComponent]] = dataclasses.field( + default=None, metadata={"schema_property_name": "extensions"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component.py new file mode 100644 index 0000000000000000000000000000000000000000..2421393b8ac30097c1d1078d1456a54e5a1dcd76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component.py @@ -0,0 +1,123 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, List, Literal, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _multiformat_message_string, + _property_bag, + _reporting_descriptor, + _tool_component_reference, + _translation_metadata, +) + + +@dataclasses.dataclass +class ToolComponent(object): + """A component, such as a plug-in or the driver, of the analysis tool that was run.""" + + name: str = dataclasses.field(metadata={"schema_property_name": "name"}) + associated_component: Optional[ + _tool_component_reference.ToolComponentReference + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "associatedComponent"} + ) + contents: List[Literal["localizedData", "nonLocalizedData"]] = dataclasses.field( + default_factory=lambda: ["localizedData", "nonLocalizedData"], + metadata={"schema_property_name": "contents"}, + ) + dotted_quad_file_version: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "dottedQuadFileVersion"} + ) + download_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "downloadUri"} + ) + full_description: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullDescription"} + ) + full_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullName"} + ) + global_message_strings: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "globalMessageStrings"} + ) + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + information_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "informationUri"} + ) + is_comprehensive: Optional[bool] = dataclasses.field( + default=None, metadata={"schema_property_name": "isComprehensive"} + ) + language: str = dataclasses.field( + default="en-US", metadata={"schema_property_name": "language"} + ) + localized_data_semantic_version: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "localizedDataSemanticVersion"} + ) + locations: Optional[List[_artifact_location.ArtifactLocation]] = dataclasses.field( + default=None, metadata={"schema_property_name": "locations"} + ) + minimum_required_localized_data_semantic_version: Optional[str] = dataclasses.field( + default=None, + metadata={ + "schema_property_name": "minimumRequiredLocalizedDataSemanticVersion" + }, + ) + notifications: Optional[ + List[_reporting_descriptor.ReportingDescriptor] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "notifications"} + ) + organization: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "organization"} + ) + product: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "product"} + ) + product_suite: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "productSuite"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + release_date_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "releaseDateUtc"} + ) + rules: Optional[ + List[_reporting_descriptor.ReportingDescriptor] + ] = dataclasses.field(default=None, metadata={"schema_property_name": "rules"}) + semantic_version: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "semanticVersion"} + ) + short_description: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "shortDescription"} + ) + supported_taxonomies: Optional[ + List[_tool_component_reference.ToolComponentReference] + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "supportedTaxonomies"} + ) + taxa: Optional[List[_reporting_descriptor.ReportingDescriptor]] = dataclasses.field( + default=None, metadata={"schema_property_name": "taxa"} + ) + translation_metadata: Optional[ + _translation_metadata.TranslationMetadata + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "translationMetadata"} + ) + version: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "version"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component_reference.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..c7929e12bc80de9d1f6db7d7810ff22ba5f51802 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_tool_component_reference.py @@ -0,0 +1,30 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import _property_bag + + +@dataclasses.dataclass +class ToolComponentReference(object): + """Identifies a particular toolComponent object, either the driver or an extension.""" + + guid: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "guid"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "name"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_translation_metadata.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_translation_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..cba1f5570d3d53d7b92b52e2ed7cc2d9d54af8ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_translation_metadata.py @@ -0,0 +1,44 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _multiformat_message_string, + _property_bag, +) + + +@dataclasses.dataclass +class TranslationMetadata(object): + """Provides additional metadata related to translation.""" + + name: str = dataclasses.field(metadata={"schema_property_name": "name"}) + download_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "downloadUri"} + ) + full_description: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullDescription"} + ) + full_name: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "fullName"} + ) + information_uri: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "informationUri"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + short_description: Optional[ + _multiformat_message_string.MultiformatMessageString + ] = dataclasses.field( + default=None, metadata={"schema_property_name": "shortDescription"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_version_control_details.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_version_control_details.py new file mode 100644 index 0000000000000000000000000000000000000000..870222e7a5ddeee87945c570d70899461375bf9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_version_control_details.py @@ -0,0 +1,42 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_location, + _property_bag, +) + + +@dataclasses.dataclass +class VersionControlDetails(object): + """Specifies the information necessary to retrieve a desired revision from a version control system.""" + + repository_uri: str = dataclasses.field( + metadata={"schema_property_name": "repositoryUri"} + ) + as_of_time_utc: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "asOfTimeUtc"} + ) + branch: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "branch"} + ) + mapped_to: Optional[_artifact_location.ArtifactLocation] = dataclasses.field( + default=None, metadata={"schema_property_name": "mappedTo"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + revision_id: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "revisionId"} + ) + revision_tag: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "revisionTag"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_web_request.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_web_request.py new file mode 100644 index 0000000000000000000000000000000000000000..b8d4e34756a48e5f0cf81b80d6c1b8d18871ab09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_web_request.py @@ -0,0 +1,48 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_content, + _property_bag, +) + + +@dataclasses.dataclass +class WebRequest(object): + """Describes an HTTP request.""" + + body: Optional[_artifact_content.ArtifactContent] = dataclasses.field( + default=None, metadata={"schema_property_name": "body"} + ) + headers: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "headers"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + method: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "method"} + ) + parameters: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "parameters"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + protocol: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "protocol"} + ) + target: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "target"} + ) + version: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "version"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_web_response.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_web_response.py new file mode 100644 index 0000000000000000000000000000000000000000..1e86deedb3c6319448868a2a355d4f0721912635 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/_web_response.py @@ -0,0 +1,48 @@ +# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29, +# with extension for dataclasses and type annotation. + +from __future__ import annotations + +import dataclasses +from typing import Any, Optional + +from torch.onnx._internal.diagnostics.infra.sarif import ( + _artifact_content, + _property_bag, +) + + +@dataclasses.dataclass +class WebResponse(object): + """Describes the response to an HTTP request.""" + + body: Optional[_artifact_content.ArtifactContent] = dataclasses.field( + default=None, metadata={"schema_property_name": "body"} + ) + headers: Any = dataclasses.field( + default=None, metadata={"schema_property_name": "headers"} + ) + index: int = dataclasses.field( + default=-1, metadata={"schema_property_name": "index"} + ) + no_response_received: Optional[bool] = dataclasses.field( + default=None, metadata={"schema_property_name": "noResponseReceived"} + ) + properties: Optional[_property_bag.PropertyBag] = dataclasses.field( + default=None, metadata={"schema_property_name": "properties"} + ) + protocol: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "protocol"} + ) + reason_phrase: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "reasonPhrase"} + ) + status_code: Optional[int] = dataclasses.field( + default=None, metadata={"schema_property_name": "statusCode"} + ) + version: Optional[str] = dataclasses.field( + default=None, metadata={"schema_property_name": "version"} + ) + + +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/version.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/version.py new file mode 100644 index 0000000000000000000000000000000000000000..2beddcb3f0427b8cd197b45f0b2f8b0355dfb419 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/sarif/version.py @@ -0,0 +1,5 @@ +from typing import Final + +SARIF_VERSION: Final = "2.1.0" +SARIF_SCHEMA_LINK: Final = "https://docs.oasis-open.org/sarif/sarif/v2.1.0/cs01/schemas/sarif-schema-2.1.0.json" +# flake8: noqa diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/utils.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4648b477515025d8fea7df71a7c732c2ed4fb764 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/infra/utils.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import functools + +import inspect +import traceback +from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple + +from torch.onnx._internal import _beartype +from torch.onnx._internal.diagnostics.infra import _infra, formatter + + +@_beartype.beartype +def python_frame(frame: traceback.FrameSummary) -> _infra.StackFrame: + """Returns a StackFrame for the given traceback.FrameSummary.""" + snippet = frame.line + + return _infra.StackFrame( + location=_infra.Location( + uri=frame.filename, + line=frame.lineno, + snippet=snippet, + function=frame.name, + message=snippet, + ) + ) + + +@_beartype.beartype +def python_call_stack(frames_to_skip: int = 0, frames_to_log: int = 16) -> _infra.Stack: + """Returns the current Python call stack.""" + if frames_to_skip < 0: + raise ValueError("frames_to_skip must be non-negative") + if frames_to_log < 0: + raise ValueError("frames_to_log must be non-negative") + frames_to_skip += 2 # Skip this function and beartype. + stack = _infra.Stack() + # Frames are returned in order of oldest to newest. + frames = traceback.extract_stack(limit=frames_to_skip + frames_to_log) + frames.reverse() + stack.frames = [python_frame(frame) for frame in frames[frames_to_skip:]] + stack.message = "Python call stack" + return stack + + +@functools.lru_cache +def _function_source_info(fn: Callable) -> Tuple[Sequence[str], int, Optional[str]]: + """Returns the source lines, line number, and source file path for the given function. + + Essentially, inspect.getsourcelines() and inspect.getsourcefile() combined. + Caching is applied to reduce the performance impact of this function. + """ + source_lines, lineno = inspect.getsourcelines(fn) + return source_lines, lineno, inspect.getsourcefile(fn) + + +@_beartype.beartype +def function_location(fn: Callable) -> _infra.Location: + """Returns a Location for the given function.""" + source_lines, lineno, uri = _function_source_info(fn) + snippet = source_lines[0].strip() if len(source_lines) > 0 else "" + return _infra.Location( + uri=uri, + line=lineno, + snippet=snippet, + message=formatter.display_name(fn), + ) + + +@_beartype.beartype +def function_state( + fn: Callable, args: Tuple[Any, ...], kwargs: Dict[str, Any] +) -> Mapping[str, Any]: + bind = inspect.signature(fn).bind(*args, **kwargs) + return bind.arguments diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/exporter.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/exporter.py new file mode 100644 index 0000000000000000000000000000000000000000..9cc2b1a23f12b7b4f144abfc4c33899f41e9e4d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/exporter.py @@ -0,0 +1,1534 @@ +from __future__ import ( # for onnx.ModelProto (ONNXProgram) and onnxruntime (ONNXRuntimeOptions) + annotations, +) + +import abc + +import contextlib +import dataclasses +import io +import logging +import os + +import warnings +from collections import defaultdict +from typing import ( + Any, + Callable, + Dict, + Final, + List, + Mapping, + Optional, + Protocol, + runtime_checkable, + Sequence, + Set, + Tuple, + TYPE_CHECKING, + TypeVar, + Union, +) + +from typing_extensions import Self + +import torch + +import torch._ops +import torch.export as torch_export +import torch.utils._pytree as pytree +from torch._subclasses import fake_tensor + +from torch.onnx._internal import _beartype, io_adapter +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.fx import ( + decomposition_table, + patcher as patcher, + registration, + serialization as fx_serialization, +) + +# We can only import onnx from this module in a type-checking context to ensure that +# 'import torch.onnx' continues to work without having 'onnx' installed. We fully +# 'import onnx' inside of dynamo_export (by way of _assert_dependencies). +if TYPE_CHECKING: + import onnx + import onnxruntime # type: ignore[import] + import onnxscript # type: ignore[import] + from onnxscript.function_libs.torch_lib import ( # type: ignore[import] + registration as torchlib_registry, + ) + + from torch.onnx._internal.fx import diagnostics +else: + try: + # beartype needs this import due to runtime type checking. + # This cannot be normally imported at top level due to + # https://github.com/pytorch/pytorch/issues/103764 + from torch.onnx._internal.fx import diagnostics + except ImportError: + # The error will be handled elsewhere when the exporter is used. + pass + +_DEFAULT_OPSET_VERSION: Final[int] = 18 +"""The default ONNX opset version the exporter will use if one is not specified explicitly +through :class:`ExportOptions`. This should NEVER be accessed outside of this module! Users +should reference :attr:`ExportOptions.opset_version`.""" + +_PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" +"""The URL to the PyTorch GitHub issues page.""" + +_DEFAULT_FAILED_EXPORT_SARIF_LOG_PATH = "report_dynamo_export.sarif" +"""The default path to write the SARIF log to if the export fails.""" + +_PROTOBUF_SIZE_MAX_LIMIT = 2 * 1024 * 1024 * 1024 +"""The maximum size of a Protobuf file in bytes. This is used to determine whether to +serialize the model with external data or not.""" + +log = logging.getLogger(__name__) + + +DiagnosticOptions = infra.DiagnosticOptions + + +@dataclasses.dataclass +class ONNXFakeContext: + """A dataclass used to store context for model export using FakeTensor. + + This dataclass stores the FakeTensorMode instance used to convert + real tensors and model parameters into fake tensors. This :attr:`ONNXFakeContext.fake_mode` is + reused internally during tracing of a :class:`torch.nn.Module` into a FX :class:`GraphModule`. + """ + + fake_mode: fake_tensor.FakeTensorMode + """The fake tensor mode used for tracing model using fake tensors and parameters.""" + + state_dict_paths: Optional[Tuple[Union[str, io.BytesIO, Dict[str, Any]]]] = None + """List of paths of files that contain the model :meth:`state_dict`""" + + +class OnnxRegistry: + """Registry for ONNX functions. + + The registry maintains a mapping from qualified names to symbolic functions under a + fixed opset version. It supports registering custom onnx-script functions and for + dispatcher to dispatch calls to the appropriate function. + + """ + + def __init__(self) -> None: + """Initializes the registry""" + + # NOTE: _registry is the registry maps OpNameto a list of ONNXFunctions. It is important + # not to directly modify this variable. Instead, access to it should be done through + # the public methods: register_custom_op, get_ops, and is_registered_op. + self._registry: Dict[ + registration.OpName, List[registration.ONNXFunction] + ] = defaultdict(list) + # FIXME: Avoid importing onnxscript into torch + from onnxscript.function_libs.torch_lib import ( # type: ignore[import] # noqa: F401 + registration, + ) + + # opset_version is unused for now, since torchlib only supports opset18. + # TODO: get opset version from torchlib + self._opset_version = _DEFAULT_OPSET_VERSION + warnings.warn( + f"torch.onnx.dynamo_export only implements opset version {self._opset_version} for now. If you need to use a " + "different opset version, please register them with register_custom_op." + ) + + # Initialize registry from torchlib + self._initiate_registry_from_torchlib(registration.default_registry) + + @property + def opset_version(self) -> int: + """The ONNX opset version the exporter should target. Defaults to the latest + supported ONNX opset version: 18. The default version will increment over time as + ONNX continues to evolve.""" + + return self._opset_version + + def _initiate_registry_from_torchlib( + self, torchlib_registry: torchlib_registry.Registry + ): + """Populates the registry with ATen functions from torchlib. + + Args: + torchlib_registry: The torchlib registry to use for populating the registry. + """ + for aten_name, aten_overloads_func in torchlib_registry.items(): + internal_name_instance = registration.OpName.from_qualified_name(aten_name) + for overload_func in aten_overloads_func.overloads: + symbolic_function = registration.ONNXFunction( + onnx_function=overload_func, + op_full_name=internal_name_instance.qualified_name(), + is_custom=False, + is_complex=False, + ) + self._register(internal_name_instance, symbolic_function) + + for complex_func in aten_overloads_func.complex: + symbolic_function = registration.ONNXFunction( + onnx_function=complex_func, + op_full_name=internal_name_instance.qualified_name(), + is_custom=False, + is_complex=True, + ) + self._register(internal_name_instance, symbolic_function) + + @_beartype.beartype + def _register( + self, + internal_qualified_name: registration.OpName, + symbolic_function: registration.ONNXFunction, + ) -> None: + """Registers a ONNXFunction to an operator. + + Args: + internal_qualified_name: The qualified name of the operator to register: OpName. + symbolic_function: The ONNXFunction to register. + """ + self._registry[internal_qualified_name].append(symbolic_function) + + @_beartype.beartype + def register_op( + self, + function: Union["onnxscript.OnnxFunction", "onnxscript.TracedOnnxFunction"], + namespace: str, + op_name: str, + overload: Optional[str] = None, + is_complex: bool = False, + ) -> None: + """Registers a custom operator: torch.ops.... + + Args: + function: The onnx-sctip function to register. + namespace: The namespace of the operator to register. + op_name: The name of the operator to register. + overload: The overload of the operator to register. If it's default overload, + leave it to None. + is_complex: Whether the function is a function that handles complex valued inputs. + + Raises: + ValueError: If the name is not in the form of 'namespace::op'. + """ + internal_name_instance = registration.OpName.from_name_parts( + namespace=namespace, op_name=op_name, overload=overload + ) + symbolic_function = registration.ONNXFunction( + onnx_function=function, + op_full_name=internal_name_instance.qualified_name(), + is_custom=True, + is_complex=is_complex, + ) + self._register(internal_name_instance, symbolic_function) + + @_beartype.beartype + def get_op_functions( + self, namespace: str, op_name: str, overload: Optional[str] = None + ) -> Optional[List[registration.ONNXFunction]]: + """Returns a list of ONNXFunctions for the given op: torch.ops.... + + The list is ordered by the time of registration. The custom operators should be + in the second half of the list. + + Args: + namespace: The namespace of the operator to get. + op_name: The name of the operator to get. + overload: The overload of the operator to get. If it's default overload, + leave it to None. + Returns: + A list of ONNXFunctions corresponding to the given name, or None if + the name is not in the registry. + """ + internal_name_instance = registration.OpName.from_name_parts( + namespace=namespace, op_name=op_name, overload=overload + ) + return self._registry.get(internal_name_instance) + + @_beartype.beartype + def is_registered_op( + self, namespace: str, op_name: str, overload: Optional[str] = None + ) -> bool: + """Returns whether the given op is registered: torch.ops.... + + Args: + namespace: The namespace of the operator to check. + op_name: The name of the operator to check. + overload: The overload of the operator to check. If it's default overload, + leave it to None. + + Returns: + True if the given op is registered, otherwise False. + """ + functions = self.get_op_functions( + namespace=namespace, op_name=op_name, overload=overload + ) + return functions is not None + + @_beartype.beartype + def _all_registered_ops(self) -> Set[str]: + """Returns the set of all registered function names.""" + return { + op_name_class.qualified_name() for op_name_class in self._registry.keys() + } + + +class ExportOptions: + """Options to influence the TorchDynamo ONNX exporter. + + Attributes: + dynamic_shapes: Shape information hint for input/output tensors. + When ``None``, the exporter determines the most compatible setting. + When ``True``, all input shapes are considered dynamic. + When ``False``, all input shapes are considered static. + op_level_debug: Whether to export the model with op-level debug information + diagnostic_options: The diagnostic options for the exporter. + fake_context: The fake context used for symbolic tracing. + onnx_registry: The ONNX registry used to register ATen operators to ONNX functions. + """ + + dynamic_shapes: Optional[bool] = None + """Shape information hint for input/output tensors. + + - ``None``: the exporter determines the most compatible setting. + - ``True``: all input shapes are considered dynamic. + - ``False``: all input shapes are considered static. + """ + + op_level_debug: Optional[bool] = None + """When True export the model with op-level debug running ops through ONNX Runtime.""" + + diagnostic_options: DiagnosticOptions + """The diagnostic options for the exporter.""" + + fake_context: Optional[ONNXFakeContext] = None + """The fake context used for symbolic tracing.""" + + onnx_registry: Optional[OnnxRegistry] = None + """The ONNX registry used to register ATen operators to ONNX functions.""" + + @_beartype.beartype + def __init__( + self, + *, + dynamic_shapes: Optional[bool] = None, + op_level_debug: Optional[bool] = None, + fake_context: Optional[ONNXFakeContext] = None, + onnx_registry: Optional[OnnxRegistry] = None, + diagnostic_options: Optional[DiagnosticOptions] = None, + ): + self.dynamic_shapes = dynamic_shapes + self.op_level_debug = op_level_debug + self.fake_context = fake_context + self.onnx_registry = onnx_registry + self.diagnostic_options = diagnostic_options or DiagnosticOptions() + + +class ResolvedExportOptions(ExportOptions): + """Consolidates :class:`ExportOptions` with default values. + All unspecified options from :class:`ExportOptions` are assigned a default value. + This is an internal class and its API may be changed at any time without notice. + """ + + # Public attributes MUST be redefined below without ``Optional[]`` from ``ExportOptions`` + dynamic_shapes: bool + op_level_debug: bool + diagnostic_options: DiagnosticOptions + fake_context: ONNXFakeContext + onnx_registry: OnnxRegistry + + # Private only attributes + decomposition_table: Dict[torch._ops.OpOverload, Callable] + """A dictionary that maps operators to their decomposition functions.""" + + onnxfunction_dispatcher: torch.onnx._internal.fx.onnxfunction_dispatcher.OnnxFunctionDispatcher + """The ONNX dispatcher used to dispatch ATen operators to ONNX functions.""" + + fx_tracer: FXGraphExtractor + """The FXGraphExtractor instance used to extract the FX graph from the model.""" + + diagnostic_context: diagnostics.DiagnosticContext + """The diagnostics context for the export. Responsible for recording diagnostics, + logging diagnostics, and generating the SARIF log.""" + + @_beartype.beartype + def __init__( + self, + options: Union[ExportOptions, "ResolvedExportOptions"], + model: Optional[Union[torch.nn.Module, Callable, torch_export.ExportedProgram]] = None, # type: ignore[name-defined] + ): + from torch.onnx._internal.fx import ( # TODO: Prevent circular dep + diagnostics, + dynamo_graph_extractor, + torch_export_graph_extractor, + ) + + if isinstance(options, ResolvedExportOptions): + self.dynamic_shapes = options.dynamic_shapes + self.op_level_debug = options.op_level_debug + self.diagnostic_options = options.diagnostic_options + self.fake_context = options.fake_context + # private + if isinstance(model, torch_export.ExportedProgram) and not isinstance( + options.fx_tracer, torch_export_graph_extractor.TorchExport + ): + message = "'model' of type 'ExportedProgram' is only supported with 'TorchExport' FX Tracer" + e = InvalidExportOptionsError(message) + raise InvalidExportOptionsError( + ONNXProgram._from_failure(e, options.diagnostic_context), message + ) + self.fx_tracer = options.fx_tracer + self.onnx_registry = options.onnx_registry + self.onnxfunction_dispatcher = options.onnxfunction_dispatcher + self.decomposition_table = options.decomposition_table + self.diagnostic_context = options.diagnostic_context + else: + T = TypeVar("T") + + @_beartype.beartype + def resolve(value: Optional[T], fallback: Union[T, Callable[[], T]]) -> T: + if value is not None: + return value + if callable(fallback): + return fallback() + return fallback + + self.dynamic_shapes = resolve(options.dynamic_shapes, False) + + self.diagnostic_options = resolve( + options.diagnostic_options, DiagnosticOptions() + ) + if isinstance(model, torch_export.ExportedProgram): + self.fx_tracer = torch_export_graph_extractor.TorchExport() + else: + self.fx_tracer = dynamo_graph_extractor.DynamoExport() + + self.fake_context = resolve(options.fake_context, None) + self.diagnostic_context = diagnostics.DiagnosticContext( + "torch.onnx.dynamo_export", + torch.__version__, + self.diagnostic_options, + ) + + self.onnx_registry = resolve(options.onnx_registry, OnnxRegistry()) + self.decomposition_table = ( + decomposition_table.create_onnx_friendly_decomposition_table( + self.onnx_registry + ) + ) + + from torch.onnx._internal.fx import onnxfunction_dispatcher + + self.op_level_debug = resolve(options.op_level_debug, False) + self.onnxfunction_dispatcher = ( + onnxfunction_dispatcher.OnnxFunctionDispatcher( + self.onnx_registry, + self.diagnostic_context, + ) + ) + + for key in dir(options): + if not key.startswith("_"): # skip private attributes + assert hasattr(self, key), f"Unresolved option '{key}'" + + +@contextlib.contextmanager +def enable_fake_mode(): + """Enable fake mode for the duration of the context. + + Internally it instantiates a :class:`torch._subclasses.fake_tensor.FakeTensorMode` context manager + that converts user input and model parameters into :class:`torch._subclasses.fake_tensor.FakeTensor`. + + A :class:`torch._subclasses.fake_tensor.FakeTensor` + is a :class:`torch.Tensor` with the ability to run PyTorch code without having to + actually do computation through tensors allocated on a ``meta`` device. Because + there is no actual data being allocated on the device, this API allows for + exporting large models without the actual memory footprint needed for executing it. + + It is highly recommended to enable fake mode when exporting models that + are too large to fit into memory. + + Returns: + A :class:`ONNXFakeContext` object that must be passed to :func:`dynamo_export` + through the :attr:`ExportOptions.fake_context` argument. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> import torch.onnx + >>> class MyModel(torch.nn.Module): # Dummy model + ... def __init__(self) -> None: + ... super().__init__() + ... self.linear = torch.nn.Linear(2, 2) + ... def forward(self, x): + ... out = self.linear(x) + ... return out + >>> with torch.onnx.enable_fake_mode() as fake_context: + ... my_nn_module = MyModel() + ... arg1 = torch.randn(2, 2, 2) # positional input 1 + >>> export_options = torch.onnx.ExportOptions(fake_context=fake_context) + >>> onnx_program = torch.onnx.dynamo_export( + ... my_nn_module, + ... arg1, + ... export_options=export_options + ... ) + >>> # Saving model WITHOUT initializers + >>> onnx_program.save("my_model_without_initializers.onnx") + >>> # Saving model WITH initializers + >>> onnx_program.save("my_model_with_initializers.onnx", model_state=MyModel().state_dict()) + + .. warning:: + This API is experimental and is *NOT* backward-compatible. + + """ + from torch._subclasses import fake_tensor + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + # This overrides the internal `FakeTensorMode` instance created by `torch._dynamo.export`[1]. + # It is a good idea to keep them in sync (constructor args) to maintain the same default behavior + # [1] `torch/_dynamo/output_graph.py::InstructionTranslator::OutputGraph.__init__` + # Mixed fake/real tensors are only allowed when `torch.onnx.dynamo_export` is not called within `FakeTensorMode` + # This is needed because models can create new parameters during `forward(self, *args, **kwargs)` run + fake_mode = fake_tensor.FakeTensorMode( + allow_non_fake_inputs=not torch._guards.detect_fake_mode(), + shape_env=ShapeEnv( + allow_scalar_outputs=False, allow_dynamic_output_shape_ops=False + ), + ) + # The patcher is needed for when user calls `fake_model.load_state_dict(...)` within fake mode + patcher_context = patcher.ONNXTorchPatcher() + fake_context = ONNXFakeContext(fake_mode=fake_mode) + with fake_mode, patcher_context: + yield fake_context + fake_context.state_dict_paths = tuple( + patcher_context.paths, + ) # type: ignore[assignment] + + +@runtime_checkable +class ONNXProgramSerializer(Protocol): + """Protocol for serializing an ONNX graph into a specific format (e.g. Protobuf). + Note that this is an advanced usage scenario.""" + + def serialize( + self, onnx_program: ONNXProgram, destination: io.BufferedIOBase + ) -> None: + """Protocol method that must be implemented for serialization. + + Args: + onnx_program: Represents the in-memory exported ONNX model + destination: A binary IO stream or pre-allocated buffer into which + the serialized model should be written. + + Example: + + A simple serializer that writes the exported :py:obj:`onnx.ModelProto` in Protobuf + format to ``destination``: + + :: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import io + >>> import torch + >>> import torch.onnx + >>> class MyModel(torch.nn.Module): # Dummy model + ... def __init__(self) -> None: + ... super().__init__() + ... self.linear = torch.nn.Linear(2, 2) + ... def forward(self, x): + ... out = self.linear(x) + ... return out + >>> class ProtobufONNXProgramSerializer: + ... def serialize( + ... self, onnx_program: torch.onnx.ONNXProgram, destination: io.BufferedIOBase + ... ) -> None: + ... destination.write(onnx_program.model_proto.SerializeToString()) + >>> model = MyModel() + >>> arg1 = torch.randn(2, 2, 2) # positional input 1 + >>> torch.onnx.dynamo_export(model, arg1).save( + ... destination="exported_model.onnx", + ... serializer=ProtobufONNXProgramSerializer(), + ... ) + """ + ... + + +class ProtobufONNXProgramSerializer: + """Serializes ONNX graph as Protobuf.""" + + @_beartype.beartype + def serialize( + self, onnx_program: ONNXProgram, destination: io.BufferedIOBase + ) -> None: + import onnx + + if not isinstance(onnx_program.model_proto, onnx.ModelProto): # type: ignore[attr-defined] + raise ValueError("onnx_program.ModelProto is not an onnx.ModelProto") + destination.write(onnx_program.model_proto.SerializeToString()) + + +class LargeProtobufONNXProgramSerializer: + """Serializes ONNX graph as Protobuf. + + Fallback to serializing as Protobuf with external data for models larger than 2GB. + """ + + _destination_path: Final[str] + + def __init__(self, destination_path: str): + self._destination_path = destination_path + + @_beartype.beartype + def serialize( + self, onnx_program: ONNXProgram, destination: io.BufferedIOBase + ) -> None: + """`destination` is ignored. The model is saved to `self._destination_path` instead.""" + import onnx + + if onnx_program.model_proto.ByteSize() < _PROTOBUF_SIZE_MAX_LIMIT: + onnx.save_model(onnx_program.model_proto, self._destination_path) # type: ignore[attr-defined] + else: + # ValueError: Message onnx.ModelProto exceeds maximum protobuf size of 2GB + # Fallback to serializing the model with external data. + onnx.save_model( # type: ignore[attr-defined] + onnx_program.model_proto, + self._destination_path, + save_as_external_data=True, + all_tensors_to_one_file=True, + ) + + +class ONNXRuntimeOptions: + """Options to influence the execution of the ONNX model through ONNX Runtime. + + Attributes: + session_options: ONNX Runtime session options. + execution_providers: ONNX Runtime execution providers to use during model execution. + execution_provider_options: ONNX Runtime execution provider options. + """ + + session_options: Optional[Sequence["onnxruntime.SessionOptions"]] = None + """ONNX Runtime session options.""" + + execution_providers: Optional[ + Sequence[Union[str, Tuple[str, Dict[Any, Any]]]] + ] = None + """ONNX Runtime execution providers to use during model execution.""" + + execution_provider_options: Optional[Sequence[Dict[Any, Any]]] = None + """ONNX Runtime execution provider options.""" + + @_beartype.beartype + def __init__( + self, + *, + session_options: Optional[Sequence["onnxruntime.SessionOptions"]] = None, + execution_providers: Optional[ + Sequence[Union[str, Tuple[str, Dict[Any, Any]]]] + ] = None, + execution_provider_options: Optional[Sequence[Dict[Any, Any]]] = None, + ): + self.session_options = session_options + self.execution_providers = execution_providers + self.execution_provider_options = execution_provider_options + + +class ONNXProgram: + """An in-memory representation of a PyTorch model that has been exported to ONNX. + + Args: + model_proto: The exported ONNX model as an :py:obj:`onnx.ModelProto`. + input_adapter: The input adapter used to convert PyTorch inputs into ONNX inputs. + output_adapter: The output adapter used to convert PyTorch outputs into ONNX outputs. + diagnostic_context: Context object for the SARIF diagnostic system responsible for logging errors and metadata. + fake_context: The fake context used for symbolic tracing. + export_exception: The exception that occurred during export, if any. + model_signature: The model signature for the exported ONNX graph. + """ + + _model_proto: Final[onnx.ModelProto] # type: ignore[name-defined] + _input_adapter: Final[io_adapter.InputAdapter] + _output_adapter: Final[io_adapter.OutputAdapter] + _diagnostic_context: Final[diagnostics.DiagnosticContext] + _fake_context: Final[Optional[ONNXFakeContext]] + _export_exception: Final[Optional[Exception]] + _model_signature: Final[Optional[torch.export.ExportGraphSignature]] + _model_torch: Final[ + Optional[Union[torch.nn.Module, Callable, torch_export.ExportedProgram]] + ] + + @_beartype.beartype + def __init__( + self, + model_proto: onnx.ModelProto, # type: ignore[name-defined] + input_adapter: io_adapter.InputAdapter, + output_adapter: io_adapter.OutputAdapter, + diagnostic_context: diagnostics.DiagnosticContext, + *, + fake_context: Optional[ONNXFakeContext] = None, + export_exception: Optional[Exception] = None, + model_signature: Optional[torch.export.ExportGraphSignature] = None, + model_torch: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ): + self._model_proto = model_proto + self._model_signature = model_signature + self._model_torch = model_torch + self._input_adapter = input_adapter + self._output_adapter = output_adapter + self._diagnostic_context = diagnostic_context + self._fake_context = fake_context + self._export_exception = export_exception + + def __call__( + self, + *args: Any, + model_with_state_dict: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + options: Optional[ONNXRuntimeOptions] = None, + **kwargs: Any, + ) -> Any: + """Runs the ONNX model using ONNX Runtime + + Args: + args: The positional inputs to the model. + kwargs: The keyword inputs to the model. + model_with_state_dict: The PyTorch model to fetch state from. + Required when :func:`enable_fake_mode` is used to extract real initializers as needed by the ONNX graph. + options: The options to use for running the model with ONNX Runtime. + + Returns: + The model output as computed by ONNX Runtime + """ + import onnxruntime # type: ignore[import] + + # model specified by the user has precedence, when specified + model_with_state_dict = model_with_state_dict or self._model_torch + + onnx_input = self.adapt_torch_inputs_to_onnx( + *args, model_with_state_dict=model_with_state_dict, **kwargs + ) + options = options or ONNXRuntimeOptions() + providers = options.execution_providers or onnxruntime.get_available_providers() + onnx_model = self.model_proto.SerializeToString() + ort_session = onnxruntime.InferenceSession(onnx_model, providers=providers) + + onnxruntime_input = { + k.name: v.numpy(force=True) + for k, v in zip(ort_session.get_inputs(), onnx_input) + } + + return ort_session.run(None, onnxruntime_input) + + @property + def model_proto(self) -> onnx.ModelProto: # type: ignore[name-defined] + """The exported ONNX model as an :py:obj:`onnx.ModelProto`.""" + + if self._export_exception is not None: + raise self._export_exception + return self._model_proto + + @property + def model_signature(self) -> Optional[torch.export.ExportGraphSignature]: + """The model signature for the exported ONNX graph. + + This information is relevant because ONNX specification often differs from PyTorch's, resulting + in a ONNX graph with input and output schema different from the actual PyTorch model implementation. + By using the model signature, the users can understand the inputs and outputs differences + and properly execute the model in ONNX Runtime. + + NOTE: Model signature is only available when the ONNX graph was exported from a + :class:`torch.export.ExportedProgram` object. + + NOTE: Any transformation done to the model that changes the model signature must be accompanied + by updates to this model signature as well through :class:`InputAdaptStep` and/or :class:`OutputAdaptStep`. + + Example: + + The following model produces different sets of inputs and outputs. + The first 4 inputs are model parameters (namely conv1.weight, conv2.weight, fc1.weight, fc2.weight), + and the next 2 inputs are registered buffers (namely my_buffer2, my_buffer1) and finally + the last 2 inputs are user inputs (namely x and b). + The first output is a buffer mutation (namely my_buffer2) and the last output is the actual model output. + + >>> class CustomModule(torch.nn.Module): + ... def __init__(self): + ... super().__init__() + ... self.my_parameter = torch.nn.Parameter(torch.tensor(2.0)) + ... self.register_buffer("my_buffer1", torch.tensor(3.0)) + ... self.register_buffer("my_buffer2", torch.tensor(4.0)) + ... self.conv1 = torch.nn.Conv2d(1, 32, 3, 1, bias=False) + ... self.conv2 = torch.nn.Conv2d(32, 64, 3, 1, bias=False) + ... self.fc1 = torch.nn.Linear(9216, 128, bias=False) + ... self.fc2 = torch.nn.Linear(128, 10, bias=False) + ... def forward(self, x, b): + ... tensor_x = self.conv1(x) + ... tensor_x = torch.nn.functional.sigmoid(tensor_x) + ... tensor_x = self.conv2(tensor_x) + ... tensor_x = torch.nn.functional.sigmoid(tensor_x) + ... tensor_x = torch.nn.functional.max_pool2d(tensor_x, 2) + ... tensor_x = torch.flatten(tensor_x, 1) + ... tensor_x = self.fc1(tensor_x) + ... tensor_x = torch.nn.functional.sigmoid(tensor_x) + ... tensor_x = self.fc2(tensor_x) + ... output = torch.nn.functional.log_softmax(tensor_x, dim=1) + ... ( + ... self.my_buffer2.add_(1.0) + self.my_buffer1 + ... ) # Mutate buffer through in-place addition + ... return output + >>> inputs = (torch.rand((64, 1, 28, 28), dtype=torch.float32), torch.randn(3)) + >>> exported_program = torch.export.export(CustomModule(), args=inputs) + >>> onnx_program = torch.onnx.dynamo_export(exported_program, *inputs) + >>> print(onnx_program.model_signature) + ExportGraphSignature( + input_specs=[ + InputSpec(kind=, arg=TensorArgument(name='arg0_1'), + target='conv1.weight', persistent=None), + InputSpec(kind=, arg=TensorArgument(name='arg1_1'), + target='conv2.weight', persistent=None), + InputSpec(kind=, arg=TensorArgument(name='arg2_1'), + target='fc1.weight', persistent=None), + InputSpec(kind=, arg=TensorArgument(name='arg3_1'), + target='fc2.weight', persistent=None), + InputSpec(kind=, arg=TensorArgument(name='arg4_1'), + target='my_buffer2', persistent=True), + InputSpec(kind=, arg=TensorArgument(name='arg5_1'), + target='my_buffer1', persistent=True), + InputSpec(kind=, arg=TensorArgument(name='arg6_1'), + target=None, persistent=None), + InputSpec(kind=, arg=TensorArgument(name='arg7_1'), + target=None, persistent=None) + ], + output_specs=[ + OutputSpec(kind=, arg=TensorArgument(name='add'), target='my_buffer2'), + OutputSpec(kind=, arg=TensorArgument(name='_log_softmax'), target=None) + ] + ) + """ + + return self._model_signature + + @property + def diagnostic_context(self) -> diagnostics.DiagnosticContext: + """The diagnostic context associated with the export.""" + + return self._diagnostic_context + + @property + def fake_context(self) -> Optional[ONNXFakeContext]: + """The fake context associated with the export.""" + + return self._fake_context + + @_beartype.beartype + def adapt_torch_inputs_to_onnx( + self, + *model_args, + model_with_state_dict: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + **model_kwargs, + ) -> Sequence[Union[torch.Tensor, int, float, bool]]: + """Converts the PyTorch model inputs to exported ONNX model inputs format. + + Due to design differences, input/output format between PyTorch model and exported + ONNX model are often not the same. E.g., None is allowed for PyTorch model, but are + not supported by ONNX. Nested constructs of tensors are allowed for PyTorch model, + but only flattened tensors are supported by ONNX, etc. + + The actual adapting steps are associated with each individual export. It + depends on the PyTorch model, the particular set of model_args and model_kwargs + used for the export, and export options. + + This method replays the adapting steps recorded during export. + + Args: + model_args: The PyTorch model inputs. + model_with_state_dict: The PyTorch model to get extra state from. + If not specified, the model used during export is used. + Required when :func:`enable_fake_mode` is used to extract real initializers as needed by the ONNX graph. + model_kwargs: The PyTorch model keyword inputs. + + Returns: + A sequence of tensors converted from PyTorch model inputs. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> import torch.onnx + >>> from typing import Dict, Tuple + >>> def func_nested_input( + ... x_dict: Dict[str, torch.Tensor], + ... y_tuple: Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] + ... ): + ... if "a" in x_dict: + ... x = x_dict["a"] + ... elif "b" in x_dict: + ... x = x_dict["b"] + ... else: + ... x = torch.randn(3) + ... + ... y1, (y2, y3) = y_tuple + ... + ... return x + y1 + y2 + y3 + >>> x_dict = {"a": torch.tensor(1.)} + >>> y_tuple = (torch.tensor(2.), (torch.tensor(3.), torch.tensor(4.))) + >>> onnx_program = torch.onnx.dynamo_export(func_nested_input, x_dict, y_tuple) + >>> print(x_dict, y_tuple) + {'a': tensor(1.)} (tensor(2.), (tensor(3.), tensor(4.))) + >>> print(onnx_program.adapt_torch_inputs_to_onnx(x_dict, y_tuple, model_with_state_dict=func_nested_input)) + (tensor(1.), tensor(2.), tensor(3.), tensor(4.)) + + .. warning:: + This API is experimental and is *NOT* backward-compatible. + + """ + # model specified by the user has precedence, when specified + model_with_state_dict = model_with_state_dict or self._model_torch + assert ( + model_with_state_dict is not None + ), "model_with_state_dict must be specified." + return self._input_adapter.apply( + *model_args, model=model_with_state_dict, **model_kwargs + ) + + @_beartype.beartype + def adapt_torch_outputs_to_onnx( + self, + model_outputs: Any, + model_with_state_dict: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Union[torch.Tensor, int, float, bool]]: + """Converts the PyTorch model outputs to exported ONNX model outputs format. + + Due to design differences, input/output format between PyTorch model and exported + ONNX model are often not the same. E.g., None is allowed for PyTorch model, but are + not supported by ONNX. Nested constructs of tensors are allowed for PyTorch model, + but only flattened tensors are supported by ONNX, etc. + + The actual adapting steps are associated with each individual export. It + depends on the PyTorch model, the particular set of model_args and model_kwargs + used for the export, and export options. + + This method replays the adapting steps recorded during export. + + Args: + model_outputs: The PyTorch model outputs. + model_with_state_dict: The PyTorch model to get extra state from. + If not specified, the model used during export is used. + Required when :func:`enable_fake_mode` is used to extract real initializers as needed by the ONNX graph. + + Returns: + PyTorch model outputs in exported ONNX model outputs format. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> import torch.onnx + >>> def func_returning_tuples(x, y, z): + ... x = x + y + ... y = y + z + ... z = x + y + ... return (x, (y, z)) + >>> x = torch.tensor(1.) + >>> y = torch.tensor(2.) + >>> z = torch.tensor(3.) + >>> onnx_program = torch.onnx.dynamo_export(func_returning_tuples, x, y, z) + >>> pt_output = func_returning_tuples(x, y, z) + >>> print(pt_output) + (tensor(3.), (tensor(5.), tensor(8.))) + >>> print(onnx_program.adapt_torch_outputs_to_onnx(pt_output, model_with_state_dict=func_returning_tuples)) + [tensor(3.), tensor(5.), tensor(8.)] + + .. warning:: + This API is experimental and is *NOT* backward-compatible. + + """ + # model specified by the user has precedence, when specified + model_with_state_dict = model_with_state_dict or self._model_torch + assert ( + model_with_state_dict is not None + ), "model_with_state_dict must be specified." + return self._output_adapter.apply(model_outputs, model=model_with_state_dict) + + @_beartype.beartype + def save( + self, + destination: Union[str, io.BufferedIOBase], + *, + model_state: Optional[Union[Dict[str, Any], str]] = None, + serializer: Optional[ONNXProgramSerializer] = None, + ) -> None: + """Saves the in-memory ONNX model to ``destination`` using specified ``serializer``. + + Args: + destination: The destination to save the ONNX model. It can be either a string or a file-like object. + When used with ``model_state``, it must be a string with a full path to the destination. + If `destination` is a string, besides saving the ONNX model into a file, model weights are also stored + in separate files in the same directory as the ONNX model. E.g. for `destination="/path/model.onnx"`, + the initializers are saved in "/path/" folder along with "onnx.model". + model_state: The state_dict of the PyTorch model containing all weights on it. + It can be either a string with the path to a checkpoint or a dictionary with the actual model state. + The supported file formats are the same as those supported by `torch.load` and `safetensors.safe_open`. + Required when :func:`enable_fake_mode` is used but real initializers are needed on the ONNX graph. + serializer: The serializer to use. If not specified, the model will be serialized as Protobuf. + """ + if serializer is None: + if isinstance(destination, str): + serializer = LargeProtobufONNXProgramSerializer(destination) + else: + serializer = ProtobufONNXProgramSerializer() + + # Add initializers when symbolic tracing is enabled + _model_state_files: List[Union[str, io.BytesIO, Dict[str, Any]]] = [] + if model_state is not None: + assert isinstance( + model_state, (dict, str) + ), "model_state must be a path to the model's state_dict or the actual state_dict" + # NOTE: For dict, there can be performance penalty or high memory usage that might lead to OOM + # if the dict wasn't loaded with torch.load(..., mmap=True, map_location="cpu") + _model_state_files.append(model_state) + elif self._fake_context and self._fake_context.state_dict_paths: + # Load state from previous model.load_state_dict() call within enable_fake_mode() context + for path in self._fake_context.state_dict_paths: + if path in _model_state_files: + # ignore duplicate + continue + if os.path.exists(path): # type: ignore[arg-type] + _model_state_files.append(path) + + if _model_state_files: + if not isinstance(destination, str): + raise RuntimeError( + "`destination` must be a string with a path when `model_state` is specified." + ) + destination_path, destination_filename = os.path.split(destination) + destination_path = destination_path or os.getcwd() + onnx_model_location = destination_filename + + # TODO: Should this be part of the serializer? + fx_serialization.save_model_with_external_data( + destination_path, + onnx_model_location, + "", # When initializers >2GB, must be in the same folder as the model + tuple(_model_state_files), + self.model_proto, + ) + else: + if isinstance(destination, str): + with open(destination, "wb") as f: + serializer.serialize(self, f) + else: + try: + serializer.serialize(self, destination) + except ValueError as exc: + raise ValueError( + "'destination' should be provided as a path-like string when saving a model larger than 2GB. " + "External tensor data will be saved alongside the model on disk." + ) from exc + + @_beartype.beartype + def save_diagnostics(self, destination: str) -> None: + """Saves the export diagnostics as a SARIF log to the specified destination path. + + Args: + destination: The destination to save the diagnostics SARIF log. + It must have a `.sarif` extension. + + Raises: + ValueError: If the destination path does not end with `.sarif` extension. + """ + if not destination.endswith(".sarif"): + message = f"'destination' must have a .sarif extension, got {destination}" + log.fatal(message) + raise ValueError(message) + + self.diagnostic_context.dump(destination) + + @classmethod + def _from_failure( + cls, + export_exception: Exception, + diagnostic_context: diagnostics.DiagnosticContext, + ) -> Self: + """ + Creates an instance of :class:`ONNXProgram` when the export process encounters a failure. + + In case of a failed export, this method is used to encapsulate the exception + and associated diagnostic context within an :class:`ONNXProgram` instance for + easier handling and debugging. + + Args: + export_exception: The exception raised during the export process. + diagnostic_context: The context associated with diagnostics during export. + + Returns: + An instance of :class:`ONNXProgram` representing the failed ONNX program. + """ + # Defer `import onnx` out of `import torch` path + # https://github.com/pytorch/pytorch/issues/103764 + import onnx + + # TODO: Should we populate ONNXProgram with more info, such _model_torch for easier debug? + return ONNXProgram( + onnx.ModelProto(), # type: ignore[attr-defined] + io_adapter.InputAdapter(), + io_adapter.OutputAdapter(), + diagnostic_context, + export_exception=export_exception, + ) + + +class FXGraphExtractor(abc.ABC): + """Abstract interface for FX graph extractor engines. + This class isolates FX extraction logic from the rest of the export logic. + That allows a single ONNX exporter that can leverage different FX graphs.""" + + def __init__(self) -> None: + super().__init__() + self.input_adapter: io_adapter.InputAdapter = io_adapter.InputAdapter() + self.output_adapter: io_adapter.OutputAdapter = io_adapter.OutputAdapter() + + @abc.abstractmethod + def generate_fx( + self, + options: ResolvedExportOptions, + model: Union[torch.nn.Module, Callable], + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + ) -> torch.fx.GraphModule: + """Analyzes user ``model`` and generates a FX graph. + Args: + options: The export options. + model: The user model. + model_args: The model's positional input arguments. + model_kwargs: The model's keyword input arguments. + Returns: + The generated FX Graph. + """ + ... + + # TODO: Design the passes API + @abc.abstractmethod + def pre_export_passes( + self, + options: ResolvedExportOptions, + original_model: Union[torch.nn.Module, Callable], + fx_module: torch.fx.GraphModule, + fx_module_args: Sequence[Any], + ): + """Applies pre-export passes to the FX graph. + + Pre-export passes are FX-to-FX graph transformations that make the graph + more palatable for the FX-to-ONNX conversion. + For example, it can be used to flatten model input/output, add explicit + casts to the graph, replace/decompose operators, functionalize the graph, etc. + """ + ... + + +class Exporter: + @_beartype.beartype + def __init__( + self, + options: ResolvedExportOptions, + model: Union[torch.nn.Module, Callable, torch_export.ExportedProgram], + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + ): + self.options = options + assert self.options is not None + + self.model = model + self.model_args = model_args + self.model_kwargs = model_kwargs + + # TODO: https://github.com/pytorch/pytorch/issues/107714 + # NOTE: FXSymbolicTracer would fail in this assert, as it does not use `enable_fake_mode` + from torch.onnx._internal.fx import fx_symbolic_graph_extractor + + if not isinstance( + self.options.fx_tracer, fx_symbolic_graph_extractor.FXSymbolicTracer + ): + self._assert_fake_tensor_mode() + + def export(self) -> ONNXProgram: + # TODO: Defer `import onnxscript` out of `import torch` path + # https://github.com/pytorch/pytorch/issues/103764 + from torch.onnx._internal.fx import decomposition_skip + + with self.options.diagnostic_context, decomposition_skip.enable_decomposition_skips( + self.options + ): + graph_module = self.options.fx_tracer.generate_fx( + self.options, self.model, self.model_args, self.model_kwargs + ) + # TODO: Defer `import onnxscript` out of `import torch` path + # https://github.com/pytorch/pytorch/issues/103764 + from torch.onnx._internal.fx import fx_onnx_interpreter + + fx_interpreter = fx_onnx_interpreter.FxOnnxInterpreter( + diagnostic_context=self.options.diagnostic_context + ) + onnxscript_graph = fx_interpreter.run( + fx_graph_module=graph_module, + onnxfunction_dispatcher=self.options.onnxfunction_dispatcher, + op_level_debug=self.options.op_level_debug, + ) + + # NOTE: Filter out the initializers with fake tensors when it's fake_mode exporting. + # Otherwise, the ONNX exporter will fail: RuntimeError: basic_string::_M_construct null + # not valid. + # Concrete data is expected to be filled for those initializers later during `ONNXProgram.save`. + if self.options.fake_context is not None: + initializers_with_real_tensors: Dict[str, torch.Tensor] = {} + for ( + initializer_name, + initializer, + ) in onnxscript_graph.initializers.items(): + if not isinstance(initializer, torch._subclasses.FakeTensor): + initializers_with_real_tensors[initializer_name] = initializer + onnxscript_graph.initializers = initializers_with_real_tensors + + # Export TorchScript graph to ONNX ModelProto. + onnx_model = onnxscript_graph.to_model_proto( + self.options.onnx_registry.opset_version, + ) + + return torch.onnx.ONNXProgram( + onnx_model, + self.options.fx_tracer.input_adapter, + self.options.fx_tracer.output_adapter, + self.options.diagnostic_context, + fake_context=self.options.fake_context, + model_signature=getattr( + self.model, "graph_signature", None + ), # Available for isinstance(self.model, ExportedProgram) only + model_torch=self.model, + ) + + def _assert_fake_tensor_mode(self): + """Asserts that the model and its input do not contain fake tensors.""" + + # Case 1: Model with fake inputs/weights and without enabling fake mode + has_any_fake_tensor = pytree.tree_any( + lambda x: isinstance(x, torch._subclasses.FakeTensor), + (self.model_args, self.model_kwargs), + ) + has_any_fake_param_or_buffer = False + if isinstance(self.model, torch.nn.Module): + has_any_fake_param_or_buffer = pytree.tree_any( + lambda x: isinstance(x, torch._subclasses.FakeTensor), + (self.model.parameters(), self.model.buffers()), + ) + if ( + has_any_fake_tensor or has_any_fake_param_or_buffer + ) and not self.options.fake_context: + raise RuntimeError( + "Cannot export a model with fake inputs/weights without enabling fake mode.", + ) + # Case 2: Model with non fake inputs/weights and enabled fake mode + has_any_non_fake_tensors = pytree.tree_any( + lambda x: isinstance(x, torch.Tensor) + and not isinstance(x, torch._subclasses.FakeTensor), + (self.model_args, self.model_kwargs), + ) + has_any_non_fake_param_or_buffer = False + if isinstance(self.model, torch.nn.Module): + has_any_non_fake_param_or_buffer = pytree.tree_any( + lambda x: isinstance(x, torch.Tensor) + and not isinstance(x, torch._subclasses.FakeTensor), + (self.model.parameters(), self.model.buffers()), + ) + if ( + has_any_non_fake_tensors or has_any_non_fake_param_or_buffer + ) and self.options.fake_context: + raise RuntimeError( + "Cannot export a model with non fake inputs/weights and enabled fake mode.", + ) + + +class UnsatisfiedDependencyError(RuntimeError): + """Raised when an ONNX exporter dependency cannot be satisfied.""" + + def __init__(self, package_name: str, message: str): + super().__init__(message) + self.package_name = package_name + + +class OnnxExporterError(RuntimeError): + """Raised when an ONNX exporter error occurs. + + This exception is thrown when there's an error during the ONNX export process. + It encapsulates the :class:`ONNXProgram` object generated until the failure, allowing + access to the partial export results and associated metadata. + """ + + onnx_program: Final[ONNXProgram] + + def __init__(self, onnx_program: ONNXProgram, message: str): + """ + Initializes the OnnxExporterError with the given ONNX program and message. + + Args: + onnx_program (ONNXProgram): The partial results of the ONNX export. + message (str): The error message to be displayed. + """ + super().__init__(message) + self.onnx_program = onnx_program + + +class InvalidExportOptionsError(RuntimeError): + """Raised when user specified an invalid value for the :class:`ExportOptions`.""" + + pass + + +@_beartype.beartype +def _assert_dependencies(export_options: ResolvedExportOptions): + opset_version = export_options.onnx_registry.opset_version + + def missing_package(package_name: str, exc_info: logging._ExcInfoType): + message = ( + f"Please install the `{package_name}` package " + f"(e.g. `python -m pip install {package_name}`)." + ) + log.fatal(message, exc_info=exc_info) + return UnsatisfiedDependencyError(package_name, message) + + def missing_opset(package_name: str): + message = ( + f"The installed `{package_name}` does not support the specified ONNX opset " + f"version {opset_version}. Install a newer `{package_name}` package or " + f"specify an older opset version." + ) + log.fatal(message) + return UnsatisfiedDependencyError(package_name, message) + + try: + import onnx + except ImportError as e: + raise missing_package("onnx", e) from e + + if onnx.defs.onnx_opset_version() < opset_version: + raise missing_opset("onnx") + + try: + # PyTorch runs lintrunner in CI without onnxscript installed + import onnxscript # type: ignore[import] + except ImportError as e: + raise missing_package("onnxscript", e) from e + + if not isinstance( + onnxscript.onnx_opset.all_opsets[("", opset_version)], + onnxscript.values.Opset, + ): + raise missing_opset("onnxscript") + + +@_beartype.beartype +def dynamo_export( + model: Union[torch.nn.Module, Callable, torch_export.ExportedProgram], # type: ignore[name-defined] + /, + *model_args, + export_options: Optional[ExportOptions] = None, + **model_kwargs, +) -> ONNXProgram: + """Export a torch.nn.Module to an ONNX graph. + + Args: + model: The PyTorch model to be exported to ONNX. + model_args: Positional inputs to ``model``. + model_kwargs: Keyword inputs to ``model``. + export_options: Options to influence the export to ONNX. + + Returns: + An in-memory representation of the exported ONNX model. + + **Example 1 - Simplest export** + :: + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.linear = torch.nn.Linear(2, 2) + def forward(self, x, bias=None): + out = self.linear(x) + out = out + bias + return out + model = MyModel() + kwargs = {"bias": 3.} + args = (torch.randn(2, 2, 2),) + onnx_program = torch.onnx.dynamo_export( + model, + *args, + **kwargs).save("my_simple_model.onnx") + + **Example 2 - Exporting with dynamic shapes** + :: + + # The previous model can be exported with dynamic shapes + export_options = torch.onnx.ExportOptions(dynamic_shapes=True) + onnx_program = torch.onnx.dynamo_export( + model, + *args, + **kwargs, + export_options=export_options) + onnx_program.save("my_dynamic_model.onnx") + + + By printing input dynamic dimensions we can see the input shape is no longer (2,2,2) + :: + + >>> print(onnx_program.model_proto.graph.input[0]) + name: "arg0" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_param: "arg0_dim_0" + } + dim { + dim_param: "arg0_dim_1" + } + dim { + dim_param: "arg0_dim_2" + } + } + } + } + """ + + if export_options is not None: + resolved_export_options = ( + export_options + if isinstance(export_options, ResolvedExportOptions) + else ResolvedExportOptions(export_options, model=model) + ) + else: + resolved_export_options = ResolvedExportOptions(ExportOptions(), model=model) + + _assert_dependencies(resolved_export_options) + + try: + return Exporter( + options=resolved_export_options, + model=model, + model_args=model_args, + model_kwargs=model_kwargs, + ).export() + except Exception as e: + sarif_report_path = _DEFAULT_FAILED_EXPORT_SARIF_LOG_PATH + resolved_export_options.diagnostic_context.dump(sarif_report_path) + message = ( + f"Failed to export the model to ONNX. Generating SARIF report at '{sarif_report_path}'. " + "SARIF is a standard format for the output of static analysis tools. " + "SARIF logs can be loaded in VS Code SARIF viewer extension, " + "or SARIF web viewer (https://microsoft.github.io/sarif-web-component/). " + f"Please report a bug on PyTorch Github: {_PYTORCH_GITHUB_ISSUES_URL}" + ) + raise OnnxExporterError( + ONNXProgram._from_failure(e, resolved_export_options.diagnostic_context), + message, + ) from e + + +def common_pre_export_passes( + options: ResolvedExportOptions, + original_model: Union[torch.nn.Module, Callable], + fx_module: torch.fx.GraphModule, + fx_module_args: Sequence[Any], +): + # TODO: Import here to prevent circular dependency + from torch.onnx._internal.fx import analysis, passes + + diagnostic_context = options.diagnostic_context + + # Apply decomposition table to the input graph. + module = passes.Decompose( + diagnostic_context, + fx_module, + options.decomposition_table, + enable_dynamic_axes=options.dynamic_shapes, + allow_fake_constant=options.fake_context is not None, + ).run(*fx_module_args) + + # ONNX does not support views and mutations. + # Functionalize to get a semantically equivalent graph without mutations. + module = passes.Functionalize( + diagnostic_context, + module, + enable_dynamic_axes=options.dynamic_shapes, + allow_fake_constant=options.fake_context is not None, + ).run(*fx_module_args) + + # Input mutations are detected and distilled after `Functionalize` pass. + # Remove them since ONNX inference does not need them. + module = passes.RemoveInputMutation(diagnostic_context, module).run(*fx_module_args) + + # ONNX does not support concept of (implicit) type promotion. + # Insert type casts explicitly where needed. + module = passes.InsertTypePromotion(diagnostic_context, module).run() + + analysis.UnsupportedFxNodesAnalysis( + diagnostic_context, module, options.onnxfunction_dispatcher + ).analyze(infra.levels.ERROR) + + if isinstance(original_model, torch.nn.Module): + module = passes.RestoreParameterAndBufferNames( + diagnostic_context, module, original_model + ).run() + + # This operation should be invoked as the last pre export pass. + # See [NOTE: Modularize pass ordering] + module = passes.Modularize(diagnostic_context, module).run() + + # ONNX does not support None inputs. During graph building, all None inputs + # are removed. Here we register this step to input adapter. + options.fx_tracer.input_adapter.append_step(io_adapter.RemoveNoneInputStep()) + + # NOTE: temp workaround for https://github.com/pytorch/pytorch/issues/99534 + # Dynamo doesn't support non-tensor inputs. + options.fx_tracer.input_adapter.append_step(io_adapter.RemoveNonTensorInputStep()) + + # ONNX does not support complex inputs. During graph building, all complex inputs + # are converted to real representation inputs. Here we register this step to + # input/output adapter. + options.fx_tracer.input_adapter.append_step( + io_adapter.ConvertComplexToRealRepresentationInputStep() + ) + + # ONNX can't represent collection types (e.g., dictionary, tuple of tuple of + # tensor, etc), we flatten the collection and register each element as output. + options.fx_tracer.output_adapter.append_step(io_adapter.FlattenOutputStep()) + + # Output post-processing steps should happen after `FlattenOutputStep`. + options.fx_tracer.output_adapter.append_step( + io_adapter.ConvertComplexToRealRepresentationOutputStep() + ) + + return module + + +__all__ = [ + "DiagnosticOptions", + "ExportOptions", + "ONNXProgram", + "ONNXProgramSerializer", + "ONNXRuntimeOptions", + "InvalidExportOptionsError", + "OnnxExporterError", + "OnnxRegistry", + "UnsatisfiedDependencyError", + "dynamo_export", + "enable_fake_mode", +] diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/io_adapter.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/io_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..830be757422be715be5342fcd629de873d9e6aea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/io_adapter.py @@ -0,0 +1,679 @@ +from __future__ import annotations + +import inspect + +from typing import ( + Any, + Callable, + List, + Mapping, + Optional, + Protocol, + runtime_checkable, + Sequence, + Tuple, + Union, +) + +import torch +import torch.export as torch_export + +from torch.onnx._internal import _beartype +from torch.utils import _pytree as pytree + +# TODO(bowbao): Add diagnostics for IO adapters. + + +@runtime_checkable +class InputAdaptStep(Protocol): + """A protocol that defines a step in the input adapting process. + + The input adapting process is a sequence of steps that are applied to the + PyTorch model inputs to transform them into the inputs format expected by the + exported ONNX model. Each step takes the PyTorch model inputs as arguments and + returns the transformed inputs. + + This serves as a base formalized construct for the transformation done to model + input signature by any individual component in the exporter. + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + ... + + +class InputAdapter: + """A class that adapts the PyTorch model inputs to exported ONNX model inputs format.""" + + def __init__(self, steps: Optional[List[InputAdaptStep]] = None): + self._steps = steps or [] + + @_beartype.beartype + def append_step(self, step: InputAdaptStep) -> None: + """Appends a step to the input adapt steps. + + Args: + step: The step to append. + """ + self._steps.append(step) + + @_beartype.beartype + def apply( + self, + *model_args, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + **model_kwargs, + ) -> Sequence[Union[int, float, bool, str, "torch.Tensor", None]]: + """Converts the PyTorch model inputs to exported ONNX model inputs format. + + Args: + model_args: The PyTorch model inputs. + model: The PyTorch model. + model_kwargs: The PyTorch model keyword inputs. + Returns: + A sequence of tensors converted from PyTorch model inputs. + """ + args: Sequence[Any] = model_args + kwargs: Mapping[str, Any] = model_kwargs + for step in self._steps: + args, kwargs = step.apply(args, kwargs, model=model) + assert not kwargs + return args + + +@runtime_checkable +class OutputAdaptStep(Protocol): + """A protocol that defines a step in the output adapting process. + + The output adapting process is a sequence of steps that are applied to the + PyTorch model outputs to transform them into the outputs format produced by the + exported ONNX model. Each step takes the PyTorch model outputs as arguments and + returns the transformed outputs. + + This serves as a base formalized construct for the transformation done to model + output signature by any individual component in the exporter. + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Any: + ... + + +class OutputAdapter: + """A class that adapts the PyTorch model outputs to exported ONNX model outputs format.""" + + def __init__(self, steps: Optional[List[OutputAdaptStep]] = None): + self._steps = steps or [] + + @_beartype.beartype + def append_step(self, step: OutputAdaptStep) -> None: + """Appends a step to the output format steps. + + Args: + step: The step to append. + """ + self._steps.append(step) + + @_beartype.beartype + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Union["torch.Tensor", int, float, bool, str]]: + """Converts the PyTorch model outputs to exported ONNX model outputs format. + + Args: + model_outputs: The PyTorch model outputs. + model: The PyTorch model. + + Returns: + PyTorch model outputs in exported ONNX model outputs format. + """ + for step in self._steps: + model_outputs = step.apply(model_outputs, model=model) + return model_outputs + + +# TODO: make_fx lose stack info https://github.com/pytorch/pytorch/issues/90276 + + +def _replace_tuple_with_list(spec: pytree.TreeSpec) -> pytree.TreeSpec: + _type = list if spec.type == tuple else spec.type + return pytree.TreeSpec( + _type, spec.context, list(map(_replace_tuple_with_list, spec.children_specs)) + ) + + +def _open_top_level_list_if_single_element(spec: pytree.TreeSpec) -> pytree.TreeSpec: + if spec.type == list and spec.num_children == 1: + return spec.children_specs[0] + return spec + + +def _assert_identical_pytree_spec( + spec1: pytree.TreeSpec, spec2: pytree.TreeSpec, error_message: str +) -> None: + """Assert the two `TreeSpec` objects are identical. + + Args: + spec1: The first `TreeSpec` object. + spec2: The second `TreeSpec` object. + error_message: The error message to raise if the two `TreeSpec` objects are not + identical. + + Raises: + ValueError: If the two `TreeSpec` objects are not identical. + """ + # TODO(bowbao): Turn this check into diagnostic. Consider warning instead of error. + pass_if_any_checks: Sequence[Callable[[], bool]] = [ + lambda: spec1 == spec2, + # FIXME: Bug in `dynamo.export`. Sometimes outputs returned in 'list' instead of 'tuple'. + lambda: _replace_tuple_with_list(spec1) == _replace_tuple_with_list(spec2), + # FIXME: Bug in `dynamo.export`. Sometimes single function return is wrapped in list. + lambda: _open_top_level_list_if_single_element(spec1) == spec2, + lambda: spec1 == _open_top_level_list_if_single_element(spec2), + ] + + if not any(check() for check in pass_if_any_checks): + raise ValueError(f"{error_message}\nExpect {spec1}.\nActual {spec2}.") + + +class BindInputStep(InputAdaptStep): + """Bind the input arguments to the model signature.""" + + def __init__(self, model_signature: inspect.Signature): + self._model_signature = model_signature + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Bind the input arguments to the model signature. + + We hope the input kwargs will be mapped to bound.args after binding. + If not, we will raise an error. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. args is always empty. + + Raises: + ValueError: If there are keyword-only arguments left after binding args and + kwargs to model signature. + """ + bound = self._model_signature.bind(*model_args, **model_kwargs) + bound.apply_defaults() + + # keyword-only arguments are not handled. + # bound.kwargs only contains keyword-only arguments after calling + # bind & apply_defaults, so we raise if it's not empty. + if bound.kwargs: + raise ValueError("Keyword-only arguments are not supported.") + return (), bound.arguments + + +class MergeKwargsIntoArgsInputStep(InputAdaptStep): + """Merge the input kwargs into the input args.""" + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Merge the input kwargs into the input args. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. kwargs is always empty. + """ + return tuple(model_args) + tuple(model_kwargs.values()), {} + + +class LiftParametersAndBuffersIntoArgsInputStep(InputAdaptStep): + """Append parameters and buffers to model's positional argument list.""" + + def __init__(self, inputs: Tuple["torch.Tensor", ...]) -> None: + self.inputs = inputs + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Append model's parameters and buffers into its input. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args + appended inputs and kwargs. + """ + return (*model_args, *self.inputs), model_kwargs + + +class ConvertComplexToRealRepresentationInputStep(InputAdaptStep): + """Convert complex dtype tensors to real representation tensors. + + ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors + to real representation tensors (i.e., float dtype tensors with an extra dimension + representing the real and imaginary parts of the complex number). + + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Convert complex tensors to float tensors. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + """ + return ( + tuple( + torch.view_as_real(arg.resolve_conj()) + if isinstance(arg, torch.Tensor) and arg.is_complex() + else arg + for arg in model_args + ), + model_kwargs, + ) + + +class RemoveNoneInputStep(InputAdaptStep): + """Remove `None` from arguments. + + This adapt step assumes ``model_kwargs`` is empty. It also assumes ``model_args`` + is flattened, i.e. it does not check `None` inside nested collections. + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Remove `None` from arguments. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + + Raises: + ValueError: If `model_kwargs` is not empty. + """ + assert not model_kwargs + return tuple(arg for arg in model_args if arg is not None), {} + + +class RemoveNonTensorInputStep(InputAdaptStep): + """Remove the non-tensor input arguments. + + Dynamo does not support non-tensor input arguments (https://github.com/pytorch/pytorch/issues/99534). + + Specifically, it does put the input into graph with an empty node, but consumed by no ones. + The concrete value is embedded into the graph as a constant arg of a target node. Meta + suggests in this case that one should rewrite the model code to make it tensor if the + input value is supposed to change at runtime. We might need to further investigate + the feasibility of that suggestion. + + For example, + + def func(x, b=1.0): + y = x + b + z = y.relu() + return (y, z) + + x = torch.randn(1, 1, 2, dtype=torch.float32) + gm_fun, _ = dynamo.export(func, x, b=8.0, aten_graph=True, tracing_mode="real") + + # class GraphModule(torch.nn.Module): + # def forward(self, x, b): + # arg0: f32[1, 1, 2], arg1, = fx_pytree.tree_flatten_spec(([x, b], {}), self._in_spec) + # # File: path/to/pytorch/test_constant_input.py:5, code: y = x + b + # add_tensor: f32[1, 1, 2] = torch.ops.aten.add.Tensor(arg0, 8.0); arg0 = None + + # # File: path/to/pytorch/test_constant_input.py:6, code: z = y.relu() + # relu_default: f32[1, 1, 2] = torch.ops.aten.relu.default(add_tensor) + # return pytree.tree_unflatten([add_tensor, relu_default], self._out_spec) + + Empty torch.fx.Node input leading to a mismatched number of input with PyTorch, as + it's ignored in ONNX graph. Thus, we delete the useless input here. + + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Remove Constant from arguments. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + + Raises: + ValueError: If `model_kwargs` is not empty. + """ + assert not model_kwargs + return ( + tuple( + arg + for arg in model_args + if not isinstance(arg, (int, float, bool, str)) + ), + {}, + ) + + +class FlattenInputWithTreeSpecValidationInputStep(InputAdaptStep): + """Flatten nested collection types and return a flat list of elements. + + ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, + etc). + + This class stores the `SpecTree` output produced when `adapt` was called the first + time. It then validates the `SpecTree` output produced from later `adapt` calls. + """ + + _spec: Optional[pytree.TreeSpec] = None + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Flatten the model args and kwargs and validate the `SpecTree` output. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the flattened model args and kwargs. The kwargs is empty, because + they are flattened and merged into the args. + + Raises: + ValueError: If the `SpecTree` output produced from the current `model_outputs` + is not identical to the `SpecTree` output produced from the first + `model_outputs` that was passed to this method. + """ + flattened_args, spec = pytree.tree_flatten((model_args, model_kwargs)) + if self._spec is None: + self._spec = spec + else: + _assert_identical_pytree_spec( + self._spec, + spec, + error_message="Model inputs incompatible with the format that was exported. ", + ) + return flattened_args, {} + + +class FlattenOutputStep(OutputAdaptStep): + """Flatten nested collection types and return a flat list of elements. + + ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, + etc). + + NOTE: Ideally we would want to use ``FlattenOutputWithTreeSpecValidationOutputStep``, such + that `SpecTree` can be validate for new model outputs. However, this is not possible + currently because we never have access to real PyTorch model outputs during export. + Only traced outputs may be available, but they are not an accurate reflection of the + original PyTorch model outputs format as they are typically in their own unique format, + depending on the tracing strategy. + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Any]: + """Flatten the model outputs. + + Args: + model_outputs: The model outputs to flatten. + model: The PyTorch model. + + Returns: + A tuple of the flattened model outputs. + """ + return pytree.tree_leaves(model_outputs) + + +class ConvertComplexToRealRepresentationOutputStep(OutputAdaptStep): + """Convert complex dtype tensors to real representation tensors. + + ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors + to real representation tensors (i.e., float dtype tensors with an extra dimension + representing the real and imaginary parts of the complex number). + + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Any: + """Convert float tensors to complex tensors. + + Args: + model_output: The model output. + model: The PyTorch model. + + Returns: + A tuple of the model output. + """ + return [ + torch.view_as_real(output.resolve_conj()) + if isinstance(output, torch.Tensor) and torch.is_complex(output) + else output + for output in model_outputs + ] + + +class FlattenOutputWithTreeSpecValidationOutputStep(OutputAdaptStep): + """Same as ``FlattenOutputStep``, with additional `TreeSpec` validation. + + This class stores the `SpecTree` output produced when `adapt` was called the first + time. It then validates the `SpecTree` output produced from later `adapt` calls. + """ + + _spec: Optional[pytree.TreeSpec] = None + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Any]: + """Flatten the model outputs and validate the `SpecTree` output. + + Args: + model_outputs: The model outputs to flatten. + model: The PyTorch model. + + Returns: + flattened_outputs: The flattened model outputs. + + Raises: + ValueError: If the `SpecTree` output produced from the current `model_outputs` + is not identical to the `SpecTree` output produced from the first + `model_outputs` that was passed to this method. + """ + flattened_outputs, spec = pytree.tree_flatten(model_outputs) + if self._spec is None: + self._spec = spec + else: + _assert_identical_pytree_spec( + self._spec, + spec, + error_message="Model outputs incompatible with the format that was exported. ", + ) + return flattened_outputs + + +class PrependParamsBuffersConstantAotAutogradInputStep(InputAdaptStep): + """Prepend model parameters, buffers and constants to the user input. + + :func:`torch.export.export` lifts model parameters, buffers and constants as model input, thus, they + must be added to the user input before the model is executed. + + Args: + model: The PyTorch model with embedded parameters and buffers. + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Convert complex tensors to float tensors. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + """ + ordered_params = tuple( + model.state_dict[name] for name in model.graph_signature.parameters # type: ignore[union-attr,index] + ) + non_persistent_buffers = set(model.graph_signature.non_persistent_buffers) # type: ignore[union-attr] + ordered_buffers = [] + for name in model.graph_signature.buffers: # type: ignore[union-attr] + if name in non_persistent_buffers: + ordered_buffers.append(model.constants[name]) # type: ignore[union-attr] + else: + ordered_buffers.append(model.state_dict[name]) # type: ignore[union-attr,index] + ordered_constant_tensors = tuple( + model.constants[fqn] for fqn in model.graph_signature.lifted_tensor_constants # type: ignore[union-attr,index] + ) + + # NOTE: calling convention is first params, then buffers, then args as user supplied them. + # See: torch/_functorch/aot_autograd.py#L1034 + updated_args = ( + *ordered_params, + *ordered_buffers, + *ordered_constant_tensors, + *model_args, + ) + if model_kwargs: + return MergeKwargsIntoArgsInputStep().apply( + updated_args, model_kwargs, model=model + ) + return updated_args, {} + + +class PrependParamsAndBuffersAotAutogradOutputStep(OutputAdaptStep): + """Prepend model's mutated buffers to the user output. + + :func:`torch.export.export` lifts model's mutated buffers as outputs, thus, they + must be added to the user output after the model is executed. + + Args: + model: The PyTorch model with mutated buffers. + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Any]: + """Flatten the model outputs and validate the `SpecTree` output. + + Args: + model_outputs: The model outputs to flatten. + model: The PyTorch model. + + Returns: + flattened_outputs: The flattened model outputs. + """ + + assert isinstance( + model, torch_export.ExportedProgram + ), "'model' must be torch_export.ExportedProgram" + ordered_buffers = tuple( + model.state_dict[name] + if name in model.state_dict + else model.constants[name] + for name in model.graph_signature.buffers_to_mutate.values() + ) + + # NOTE: calling convention is first mutated buffers, then outputs args as model returned them. + updated_outputs = (*ordered_buffers, *model_outputs) + return updated_outputs diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d192f35bd73b50bfb9cb717fd2dc45690293185a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py @@ -0,0 +1,399 @@ +"""Utilities for manipulating the torch.Graph object and the torchscript.""" +from __future__ import annotations + +# TODO(justinchuby): Move more of the symbolic helper functions here and expose +# them to the user. + +import dataclasses +import re +import typing +from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union + +import torch +from torch import _C +from torch._C import _onnx as _C_onnx +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, registration + + +_ATTR_PATTERN = re.compile("^(.+)_(([ifstgz])|(ty))$") +_SKIP_NODE_ATTRIBUTES = {"inplace", "aten"} + + +@dataclasses.dataclass +class GraphContext: + """Extra context for symbolic functions with all methods from torch.Graph. + + NOTE: This class is not meant for external consumption. Please do not depend on + it outside of torch.onnx as the interface may evolve. + + Attributes: + graph: The _C.Graph being constructed. + block: The current _C.Block being constructed. + opset: The opset version. + original_node: Current node that is being converted from. + params_dict: Mapping from graph initializer name to IValue. + env: Mapping from Torch domain graph Value to ONNX domain graph Value. + """ + + graph: _C.Graph + block: _C.Block + opset: int + original_node: _C.Node + params_dict: Dict[str, "_C.IValue"] + env: Dict[_C.Value, _C.Value] + + # Relay methods from _C.Graph for compatibility with symbolic functions that expect + # a _C.Graph + def __getattr__(self, name: str) -> Any: + return getattr(self.graph, name) + + @_beartype.beartype + def op( + self, + opname: str, + *raw_args: Union[torch.Tensor, _C.Value], + outputs: int = 1, + **kwargs, + ): + """Creates an ONNX operator "opname", taking "raw_args" as inputs and "kwargs" as attributes. + + The set of operators and the inputs/attributes they take + is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md + + Args: + opname: The ONNX operator name, e.g., `Abs` or `Add`, or an operator qualified + with a namespace, e.g., `aten::add`. + raw_args: The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + kwargs: The attributes of the ONNX operator, whose keys are named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + + Returns: + The value representing the single output of this operator (see the `outputs` + keyword argument for multi-return nodes). + """ + # FIXME(justinchuby): Add the return type back once we know how to handle mypy + return _add_op(self, opname, *raw_args, outputs=outputs, **kwargs) + + @_beartype.beartype + def aten_op(self, operator: str, *args, overload_name: str = "", **kwargs): + """Generates an ONNX ATen op node. + + This function is for backward compatibility with the old symbolic functions. + """ + return self.op( + "aten::ATen", + *args, + operator_s=operator, + overload_name_s=overload_name, + **kwargs, + ) + + # NOTE: For backward compatibility with the old symbolic functions. + # We are probably going to remove this only after the fx exporter is established. + at = aten_op + + @_beartype.beartype + def onnxscript_op( + self, + onnx_fn, + *raw_args: Union[torch.Tensor, _C.Value], + outputs: int = 1, + **kwargs, + ): + """Creates an ONNX operator from onnx-script function, taking "raw_args" as inputs and "kwargs" as attributes. + + onnx-script repository: https://github.com/microsoft/onnx-script + + Args: + onnx_fn: ONNXFunction from onnx-script; An example can be found at + https://github.com/microsoft/onnx-script#example + raw_args: The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + kwargs: The attributes of the ONNX operator, whose keys are named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + + Returns: + The value representing the single output of this operator (see the `outputs` + keyword argument for multi-return nodes). + """ + # NOTE(titaiwang): This is using class attributes, and it needs to be updated + # if onnx-script makes any change on these. + symbolic_name = f"{onnx_fn.opset.domain}::{onnx_fn.name}" + opset_version = onnx_fn.opset.version + + registration.custom_onnx_symbolic(symbolic_name, opset_version)(onnx_fn) + + return _add_op(self, symbolic_name, *raw_args, outputs=outputs, **kwargs) + + +@_beartype.beartype +def add_op_with_blocks( + graph_context: GraphContext, + opname: str, + *inputs: _C.Value, + outputs: int = 1, + n_blocks: int = 1, + **attributes, +) -> Tuple[Any, Tuple[GraphContext, ...], _C.Node]: + """Creates an ONNX operator "opname", taking inputs and attributes. + + Args: + graph_context: The context for the current graph. + opname: The ONNX operator name, e.g., `Abs` or `Add`, or an operator qualified + with a namespace, e.g., `aten::add`. + inputs: The inputs to the operator. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + n_blocks: The number of sub-blocks to create in the node. + attributes: The attributes of the ONNX operator. + + Returns: + A tuple of (output_values, new_contexts, node) where: + output_values: One or more output value of this operator + (see the `outputs` keyword argument for multi-return nodes). + new_contexts: A tuple of new graph contexts for each sub-block. + node: The node representing the operator. + """ + + output_values = graph_context.op(opname, *inputs, outputs=outputs, **attributes) + if isinstance(output_values, Sequence): + node = output_values[0].node() + else: + node = output_values.node() + + new_contexts = [] + for _ in range(n_blocks): + new_block = node.addBlock() + # Create shallow copy of the graph context and update the block + new_context = dataclasses.replace(graph_context, block=new_block) + new_contexts.append(new_context) + + return output_values, tuple(new_contexts), node + + +@_beartype.beartype +def _add_op( + graph_context: GraphContext, + opname: str, + *args: Union[torch.Tensor, _C.Value], + outputs: int = 1, + **kwargs, +): + """Creates an ONNX operator "opname", taking "args" as inputs and attributes "kwargs". + + The set of operators and the inputs/attributes they take + is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md + + This function is monkey-patched onto Graph. + + Args: + graph_context: The Torch Graph or Block. + opname: The ONNX operator name, e.g., `Abs` or `Add`, or an operator qualified + with a namespace, e.g., `aten::add`. + args: The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + kwargs: The attributes of the ONNX operator, whose keys are named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + + Returns: + (Union[_C.Value, Tuple[_C.Value, ...]]) + The value representing the single output of this operator (see the `outputs` + keyword argument for multi-return nodes). + """ + inputs = [_const_if_tensor(graph_context, arg) for arg in args] + # Filter out None attributes, this can be convenient client side because + # now they can pass through None attributes, and have them not show up + attributes = {k: v for k, v in kwargs.items() if v is not None} + + if "::" not in opname: + opname = "onnx::" + opname + + node = _create_node( + graph_context.block, + opname, + inputs, + attributes, + params_dict=graph_context.params_dict, + opset_version=graph_context.opset, + n_outputs=outputs, + shape_inference=GLOBALS.onnx_shape_inference, + ) + + if outputs == 1: + return node.output() + return tuple(node.outputs()) + + +@_beartype.beartype +def _const_if_tensor(graph_context: GraphContext, arg): + if arg is None: + return arg + if isinstance(arg, _C.Value): + return arg + + return _add_op(graph_context, "onnx::Constant", value_z=arg) + + +def _create_node( + graph_or_block: Union[_C.Graph, _C.Block], + domain_op: str, + inputs: Sequence, + attributes: dict, + params_dict: dict, + opset_version: int, + n_outputs: int, + shape_inference: bool = True, +) -> _C.Node: + """Creates an node 'domain_op', taking inputs and attributes.""" + if isinstance(graph_or_block, _C.Graph): + graph = graph_or_block + node = graph.create(domain_op, inputs, n_outputs) + node = graph.insertNode(node) + elif isinstance(graph_or_block, _C.Block): + block = graph_or_block + node = block.addNode(domain_op, inputs) + + # Block does not have create defined, so we need to add outputs manually + if n_outputs > 1: + for _ in range(1, n_outputs): + node.addOutput() + + node_outputs = tuple(node.outputs()) # type: ignore[possibly-undefined] + assert len(node_outputs) == n_outputs + + aten = domain_op.startswith("aten::") + + # Add all attributes + for key, value in sorted(attributes.items()): + if key in _SKIP_NODE_ATTRIBUTES: + continue + _add_attribute(node, key, value, aten=aten) + if shape_inference: + _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version) + return node + + +@_beartype.beartype +def _is_onnx_list(value): + return isinstance(value, Iterable) and not isinstance( + value, (str, bytes, torch.Tensor) + ) + + +@_beartype.beartype +def _scalar(x: torch.Tensor): + """Convert a scalar tensor into a Python value.""" + assert x.numel() == 1 + return x[0] + + +@_beartype.beartype +def _is_caffe2_aten_fallback() -> bool: + return ( + GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + and _C_onnx._CAFFE2_ATEN_FALLBACK + ) + + +@_beartype.beartype +def _add_attribute(node: _C.Node, key: str, value: Any, aten: bool): + r"""Initializes the right attribute based on type of value.""" + m = _ATTR_PATTERN.match(key) + if m is None: + raise ValueError( + f"Invalid attribute specifier '{key}' names " + "must be suffixed with type, e.g. 'dim_i' or 'dims_i'" + ) + name, kind = m.group(1), m.group(2) + if _is_onnx_list(value): + kind += "s" + + if aten and _is_caffe2_aten_fallback(): + if isinstance(value, torch.Tensor): + # Caffe2 proto does not support tensor attribute. + if value.numel() > 1: + raise ValueError("Should not pass tensor attribute") + value = _scalar(value) + if isinstance(value, float): + kind = "f" + else: + kind = "i" + return getattr(node, f"{kind}_")(name, value) + + +# TODO: Expose this to user when migrating symbolic helper functions to here. +@_beartype.beartype +def _is_tensor(x: _C.Value) -> bool: + return x.type().isSubtypeOf(_C.TensorType.get()) + + +@_beartype.beartype +def get_device_from_value(value: _C.Value) -> Optional[torch.device]: + if not _is_tensor(value): + return None + tensor_type = typing.cast(_C.TensorType, value.type()) + return tensor_type.device() + + +@_beartype.beartype +def parse_node_kind(kind: str) -> Tuple[str, str]: + """Parse node kind into domain and Op name.""" + if "::" not in kind: + raise ValueError(f"Node kind: {kind} is invalid. '::' is not in node kind.") + domain, opname = kind.split("::", 1) + if "::" in opname: + raise ValueError(f"Node kind: {kind} is invalid. '::' should only apear once.") + return domain, opname + + +@_beartype.beartype +def is_aten(domain: str) -> bool: + """Check if the domain is official.""" + return domain == "aten" + + +@_beartype.beartype +def is_prim(domain: str) -> bool: + """Check if the domain is official.""" + return domain == "prim" + + +@_beartype.beartype +def is_onnx(domain: str) -> bool: + """Check if the domain is official.""" + return domain == "onnx" diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/onnx_proto_utils.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/onnx_proto_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7fb79e5b203b4fd4694984acb5cf16318fc48b3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/onnx_proto_utils.py @@ -0,0 +1,288 @@ +"""Utilities for manipulating the onnx and onnx-script dependencies and ONNX proto.""" + +from __future__ import annotations + +import glob +import io +import os +import shutil +import zipfile +from typing import Any, List, Mapping, Set, Tuple, Union + +import torch +import torch.jit._trace +import torch.serialization +from torch.onnx import _constants, _exporter_states, errors +from torch.onnx._internal import _beartype, jit_utils, registration + + +@_beartype.beartype +def export_as_test_case( + model_bytes: bytes, inputs_data, outputs_data, name: str, dir: str +) -> str: + """Export an ONNX model as a self contained ONNX test case. + + The test case contains the model and the inputs/outputs data. The directory structure + is as follows: + + dir + โ”œโ”€โ”€ test_ + โ”‚ โ”œโ”€โ”€ model.onnx + โ”‚ โ””โ”€โ”€ test_data_set_0 + โ”‚ โ”œโ”€โ”€ input_0.pb + โ”‚ โ”œโ”€โ”€ input_1.pb + โ”‚ โ”œโ”€โ”€ output_0.pb + โ”‚ โ””โ”€โ”€ output_1.pb + + Args: + model_bytes: The ONNX model in bytes. + inputs_data: The inputs data, nested data structure of numpy.ndarray. + outputs_data: The outputs data, nested data structure of numpy.ndarray. + + Returns: + The path to the test case directory. + """ + try: + import onnx + except ImportError as exc: + raise ImportError( + "Export test case to ONNX format failed: Please install ONNX." + ) from exc + + test_case_dir = os.path.join(dir, "test_" + name) + os.makedirs(test_case_dir, exist_ok=True) + _export_file( + model_bytes, + os.path.join(test_case_dir, "model.onnx"), + _exporter_states.ExportTypes.PROTOBUF_FILE, + {}, + ) + data_set_dir = os.path.join(test_case_dir, "test_data_set_0") + if os.path.exists(data_set_dir): + shutil.rmtree(data_set_dir) + os.makedirs(data_set_dir) + + proto = onnx.load_model_from_string(model_bytes) # type: ignore[attr-defined] + + for i, (input_proto, input) in enumerate(zip(proto.graph.input, inputs_data)): + export_data(input, input_proto, os.path.join(data_set_dir, f"input_{i}.pb")) + for i, (output_proto, output) in enumerate(zip(proto.graph.output, outputs_data)): + export_data(output, output_proto, os.path.join(data_set_dir, f"output_{i}.pb")) + + return test_case_dir + + +@_beartype.beartype +def load_test_case(dir: str) -> Tuple[bytes, Any, Any]: + """Load a self contained ONNX test case from a directory. + + The test case must contain the model and the inputs/outputs data. The directory structure + should be as follows: + + dir + โ”œโ”€โ”€ test_ + โ”‚ โ”œโ”€โ”€ model.onnx + โ”‚ โ””โ”€โ”€ test_data_set_0 + โ”‚ โ”œโ”€โ”€ input_0.pb + โ”‚ โ”œโ”€โ”€ input_1.pb + โ”‚ โ”œโ”€โ”€ output_0.pb + โ”‚ โ””โ”€โ”€ output_1.pb + + Args: + dir: The directory containing the test case. + + Returns: + model_bytes: The ONNX model in bytes. + inputs: the inputs data, mapping from input name to numpy.ndarray. + outputs: the outputs data, mapping from output name to numpy.ndarray. + """ + try: + import onnx + from onnx import numpy_helper + except ImportError as exc: + raise ImportError( + "Load test case from ONNX format failed: Please install ONNX." + ) from exc + + with open(os.path.join(dir, "model.onnx"), "rb") as f: + model_bytes = f.read() + + test_data_dir = os.path.join(dir, "test_data_set_0") + + inputs = {} + input_files = glob.glob(os.path.join(test_data_dir, "input_*.pb")) + for input_file in input_files: + tensor = onnx.load_tensor(input_file) # type: ignore[attr-defined] + inputs[tensor.name] = numpy_helper.to_array(tensor) + outputs = {} + output_files = glob.glob(os.path.join(test_data_dir, "output_*.pb")) + for output_file in output_files: + tensor = onnx.load_tensor(output_file) # type: ignore[attr-defined] + outputs[tensor.name] = numpy_helper.to_array(tensor) + + return model_bytes, inputs, outputs + + +@_beartype.beartype +def export_data(data, value_info_proto, f: str) -> None: + """Export data to ONNX protobuf format. + + Args: + data: The data to export, nested data structure of numpy.ndarray. + value_info_proto: The ValueInfoProto of the data. The type of the ValueInfoProto + determines how the data is stored. + f: The file to write the data to. + """ + try: + from onnx import numpy_helper + except ImportError as exc: + raise ImportError( + "Export data to ONNX format failed: Please install ONNX." + ) from exc + + with open(f, "wb") as opened_file: + if value_info_proto.type.HasField("map_type"): + opened_file.write( + numpy_helper.from_dict(data, value_info_proto.name).SerializeToString() + ) + elif value_info_proto.type.HasField("sequence_type"): + opened_file.write( + numpy_helper.from_list(data, value_info_proto.name).SerializeToString() + ) + elif value_info_proto.type.HasField("optional_type"): + opened_file.write( + numpy_helper.from_optional( + data, value_info_proto.name + ).SerializeToString() + ) + else: + assert value_info_proto.type.HasField("tensor_type") + opened_file.write( + numpy_helper.from_array(data, value_info_proto.name).SerializeToString() + ) + + +@_beartype.beartype +def _export_file( + model_bytes: bytes, + f: Union[io.BytesIO, str], + export_type: str, + export_map: Mapping[str, bytes], +) -> None: + """export/write model bytes into directory/protobuf/zip""" + if export_type == _exporter_states.ExportTypes.PROTOBUF_FILE: + assert len(export_map) == 0 + with torch.serialization._open_file_like(f, "wb") as opened_file: + opened_file.write(model_bytes) + elif export_type in { + _exporter_states.ExportTypes.ZIP_ARCHIVE, + _exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE, + }: + compression = ( + zipfile.ZIP_DEFLATED + if export_type == _exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE + else zipfile.ZIP_STORED + ) + with zipfile.ZipFile(f, "w", compression=compression) as z: + z.writestr(_constants.ONNX_ARCHIVE_MODEL_PROTO_NAME, model_bytes) + for k, v in export_map.items(): + z.writestr(k, v) + elif export_type == _exporter_states.ExportTypes.DIRECTORY: + if isinstance(f, io.BytesIO) or not os.path.isdir(f): # type: ignore[arg-type] + raise ValueError( + f"f should be directory when export_type is set to DIRECTORY, instead get type(f): {type(f)}" + ) + if not os.path.exists(f): # type: ignore[arg-type] + os.makedirs(f) # type: ignore[arg-type] + + model_proto_file = os.path.join(f, _constants.ONNX_ARCHIVE_MODEL_PROTO_NAME) # type: ignore[arg-type] + with torch.serialization._open_file_like(model_proto_file, "wb") as opened_file: + opened_file.write(model_bytes) + + for k, v in export_map.items(): + weight_proto_file = os.path.join(f, k) # type: ignore[arg-type] + with torch.serialization._open_file_like( + weight_proto_file, "wb" + ) as opened_file: + opened_file.write(v) + else: + raise ValueError("Unknown export type") + + +@_beartype.beartype +def _add_onnxscript_fn( + model_bytes: bytes, + custom_opsets: Mapping[str, int], +) -> bytes: + """Insert model-included custom onnx-script function into ModelProto""" + try: + import onnx + except ImportError as e: + raise errors.OnnxExporterError("Module onnx is not installed!") from e + + # For > 2GB model, onnx.load_fromstring would fail. However, because + # in _export_onnx, the tensors should be saved separately if the proto + # size > 2GB, and if it for some reason did not, the model would fail on + # serialization anyway in terms of the protobuf limitation. So we don't + # need to worry about > 2GB model getting here. + model_proto = onnx.load_model_from_string(model_bytes) # type: ignore[attr-defined] + + # Iterate graph nodes to insert only the included custom + # function_proto into model_proto + onnx_function_list = list() # type: ignore[var-annotated] + included_node_func = set() # type: Set[str] + # onnx_function_list and included_node_func are expanded in-place + _find_onnxscript_op( + model_proto.graph, included_node_func, custom_opsets, onnx_function_list + ) + + if onnx_function_list: + model_proto.functions.extend(onnx_function_list) + model_bytes = model_proto.SerializeToString() + return model_bytes + + +@_beartype.beartype +def _find_onnxscript_op( + graph_proto, + included_node_func: Set[str], + custom_opsets: Mapping[str, int], + onnx_function_list: List, +): + """Recursively iterate ModelProto to find ONNXFunction op as it may contain control flow Op.""" + for node in graph_proto.node: + node_kind = node.domain + "::" + node.op_type + # Recursive needed for control flow nodes: IF/Loop which has inner graph_proto + for attr in node.attribute: + if attr.g is not None: + _find_onnxscript_op( + attr.g, included_node_func, custom_opsets, onnx_function_list + ) + # Only custom Op with ONNX function and aten with symbolic_fn should be found in registry + onnx_function_group = registration.registry.get_function_group(node_kind) + # Ruled out corner cases: onnx/prim in registry + if ( + node.domain + and not jit_utils.is_aten(node.domain) + and not jit_utils.is_prim(node.domain) + and not jit_utils.is_onnx(node.domain) + and onnx_function_group is not None + and node_kind not in included_node_func + ): + specified_version = custom_opsets.get(node.domain, 1) + onnx_fn = onnx_function_group.get(specified_version) + if onnx_fn is not None: + if hasattr(onnx_fn, "to_function_proto"): + onnx_function_proto = onnx_fn.to_function_proto() # type: ignore[attr-defined] + onnx_function_list.append(onnx_function_proto) + included_node_func.add(node_kind) + continue + + raise errors.UnsupportedOperatorError( + node_kind, + specified_version, + onnx_function_group.get_min_supported() + if onnx_function_group + else None, + ) + return onnx_function_list, included_node_func diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/onnxruntime.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/onnxruntime.py new file mode 100644 index 0000000000000000000000000000000000000000..491a053f30282a54c895e7e2cd0c4a95c33dd561 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/onnxruntime.py @@ -0,0 +1,1199 @@ +import dataclasses +import importlib +import logging +import os + +from typing import ( + Any, + Callable, + Dict, + Final, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +from typing_extensions import TypeAlias + +import torch +import torch._C +import torch._ops +import torch._prims.executor +import torch.fx +from torch._subclasses.fake_tensor import FakeTensor +from torch.fx._compatibility import compatibility +from torch.fx.passes.fake_tensor_prop import FakeTensorProp +from torch.fx.passes.operator_support import OperatorSupport +from torch.fx.passes.tools_common import CALLABLE_NODE_OPS +from torch.utils import _pytree + +try: + # Use try-except to initialize package-dependent global variables. + import onnx + import onnxruntime # type: ignore[import] + from onnxruntime.capi import _pybind_state as ORTC # type: ignore[import] + + # This is not use directly in DORT but needed by underlying exporter, + # so we still need to check if it exists. + importlib.import_module("onnxscript") + + import torch.onnx + import torch.onnx._internal + import torch.onnx._internal.diagnostics + import torch.onnx._internal.exporter + import torch.onnx._internal.fx.decomposition_table + import torch.onnx._internal.fx.passes + from torch.onnx._internal.fx import fx_onnx_interpreter + from torch.onnx._internal.fx.type_utils import ( + _TORCH_DTYPE_TO_NUMPY_DTYPE, + _TORCH_DTYPE_TO_ONNX_TENSOR_ELEMENT_TYPE, + from_python_type_to_onnx_tensor_element_type, + ) + + _SUPPORT_ONNXRT = True +except ImportError: + _SUPPORT_ONNXRT = False + +__all__ = [ + "is_onnxrt_backend_supported", + "torch_compile_backend", + "OrtExecutionProvider", + "OrtBackendOptions", + "OrtBackend", +] + + +def is_onnxrt_backend_supported() -> bool: + """Returns ``True`` if ONNX Runtime dependencies are installed and usable + to support TorchDynamo backend integration; ``False`` otherwise. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> if torch.onnx.is_onnxrt_backend_supported(): + ... @torch.compile(backend="onnxrt") + ... def f(x): + ... return x * x + ... print(f(torch.randn(10))) + ... else: + ... print("pip install onnx onnxscript onnxruntime") + ... + """ + return _SUPPORT_ONNXRT + + +_dumped_onnx_model: Dict[str, int] = {} + + +def _dump_onnx_model( + model_string: bytes, graph_module: Optional[torch.fx.GraphModule] = None +) -> str: + """Stores the onnx model into a file. + The name is "{ONNXRT_DUMP_PATH}{N}.onnx" + where *N* is the number of files already stored with + this prefix. + If graph_module is not None, the graph is stored as a string with + the same filename except the extension (.txt). + """ + prefix = os.environ.get("ONNXRT_DUMP_PATH", None) + if not prefix: + return "" + n = _dumped_onnx_model.get(prefix, -1) + 1 + filename = f"{prefix}{n}.onnx" + with open(filename, "wb") as f: + f.write(model_string) + _dumped_onnx_model[prefix] = n + if graph_module is not None: + filename_txt = f"{prefix}{n}.txt" + with open(filename_txt, "w", encoding="utf-8") as f: + f.write(str(graph_module.graph)) + return filename + + +def _infer_default_eps() -> Sequence[str]: + # TODO: select a good default based on the capabilities of the host + # e.g. DML on Windows, etc. + return ["CPUExecutionProvider"] + + +def _nvtx_range_push(name: str): + """If PyTorch is installed with CUDA support, this starts NVTX range. + + Check torch.cuda.nvtx.range_push's document for more details. + """ + if torch.cuda.is_available(): + torch.cuda.nvtx.range_push(name) + + +def _nvtx_range_pop(): + """If PyTorch is installed with CUDA support, this terminates NVTX range. + + Check torch.cuda.nvtx.range_pop's document for more details. + """ + if torch.cuda.is_available(): + torch.cuda.nvtx.range_pop() + + +def _get_ort_device_type(device_type: str): + if device_type == "cuda": + return ORTC.OrtDevice.cuda() + if device_type == "cpu": + return ORTC.OrtDevice.cpu() + # ort pytorch device is mapped to NPU OrtDevice type + if device_type == "ort": + return ORTC.OrtDevice.npu() + raise ValueError("Unsupported device type: " + device_type) + + +logger = logging.getLogger(__name__) +# Uncomment the following lines to print out development info. +# logging.basicConfig(level=logging.WARNING) +# logger.setLevel(logging.WARNING) + + +class OrtOperatorSupport(OperatorSupport): + """Operator support for ONNXRuntime backend. + + It has two-level of support decision. One is via support_dict and the other one + is via extra_support_dict. The logic of using support_dict is implemented in + OrtOperatorSupport and extra_support_dict is used by OperatorSupport.is_node_supported. + """ + + def __init__(self, support_dict: Set[Any], extra_support_dict: Dict[str, Any]): + # Use extra_support_dict[op_name] = None to indicate + # we support op_name with all input types. Otherwise, + # see support_dict (type: SupportDict) in operator_support.py + # for specifying supported types. + super().__init__(extra_support_dict) + self._onnx_support_dict = support_dict + + def is_node_supported( + self, submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node + ) -> bool: + # OperatorSupport.is_node_supported returns True for non-callable nodes. + # Since ORT can't execute them, we return False here to override the base + # behavior. + if node.op not in CALLABLE_NODE_OPS: + return False + # This is the and the only place to decide if aten op is supported. + if node.op == "call_function" and node.target in self._onnx_support_dict: + logger.warning( + "support_dict supports node.target: %s (type: %s)", + node.target, + type(node.target), + ) + return True + # If node.target is not in support_dict, we still want to check if torch.jit.script + # can convert it to ONNX equivalence. Let's use base mechanism to do this. + # See extra_support_dict for supported ops. + if super().is_node_supported(submodules, node): + logger.warning( + "extra_support_dict supports node.target: %s (type: %s)", + node.target, + type(node.target), + ) + return True + logger.warning( + "support_dict and extra_support_dict don't support node.target: %s (type: %s)", + node.target, + type(node.target), + ) + return False + + +def _move_placeholder_to_front(graph_module: torch.fx.GraphModule) -> None: + """ + In torch.fx.Graph, placeholder is a special assignment node. If it's not + executed in the beginning, it could overwrite values computed by upstream + nodes. + """ + + graph = graph_module.graph + placeholders = [] + first_not_placeholder = None + for node in graph.nodes: + if node.op == "placeholder": + placeholders.append(node) + if first_not_placeholder is None and node.op != "placeholder": + first_not_placeholder = node + if first_not_placeholder is None: + return + for placeholder in placeholders: + first_not_placeholder.prepend(placeholder) + + +def _infer_ep_from_device(*args) -> Tuple[str, ...]: + """Return the first valid device (i.e., GPU or CPU) in argument list.""" + eps = [] + for arg in args: + if hasattr(arg, "device"): + device = arg.device + if device.type == "cuda": + eps.append("CUDAExecutionProvider") + elif device.type == "cpu": + eps.append("CPUExecutionProvider") + return tuple(eps) + + +def _extract_graph_module_inputs(graph_module: torch.fx.GraphModule) -> Tuple[Any, ...]: + placeholders = [] + for node in graph_module.graph.nodes: + if node.op == "placeholder": + if hasattr(node, "meta") and "val" in node.meta: + assert isinstance(node.meta["val"], torch.Tensor) + placeholders.append(node) + return tuple(placeholders) + + +def _extract_graph_module_outputs(graph_module: torch.fx.GraphModule) -> Any: + """Collect "val" fields from outputs metadata in this torch.fx.GraphModule.""" + for node in graph_module.graph.nodes: + if node.op == "output": + # Output node is unique. Let's retrieve output values from + # this node's input list. And then just return. + return node.args[0] + raise ValueError("No output node found in this torch.fx.GraphModule.") + + +def _infer_ep_from_graph_module(graph_module: torch.fx.GraphModule) -> Tuple[str, ...]: + """Return the all valid devices (i.e., GPU or CPU) among outputs of this torch.fx.GraphModule.""" + flattened_output_args, _ = _pytree.tree_flatten( + _extract_graph_module_outputs(graph_module) + ) + # Output arguments with example value (type: torch.Tensor) in the `graph_module`. + selected_output_args = [ + output_arg.meta["val"] + for output_arg in flattened_output_args + # output_arg must have tensor for its device information. + # Otherwise, skip it. + if (hasattr(output_arg, "meta") and "val" in output_arg.meta) + ] + return _infer_ep_from_device(*selected_output_args) + + +def _sort_eps(eps: Tuple[str, ...]) -> Tuple[str, ...]: + """Sort execution providers in eps based on pre-set priority.""" + + def get_execution_provider_priority(ep: str) -> int: + if ep == "CPUExecutionProvider": + # Lowest priority. + return 2 + if ep == "CUDAExecutionProvider": + # Higher priority than CPU but lower than + # other specialized EPs. + return 1 + # Highest priority. + return 0 + + unique_eps = set(eps) + return tuple(sorted(unique_eps, key=get_execution_provider_priority, reverse=True)) + + +def _get_onnx_devices( + values: Tuple[ + Union[ + torch.Tensor, torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool + ], + ..., + ] +) -> Tuple["ORTC.OrtDevice", ...]: + def _device_id_or_zero(device_id: int) -> int: + return device_id or 0 + + def _map_tensor_or_sym_to_device( + value: Union[ + torch.Tensor, torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool + ], + ) -> int: + if isinstance(value, torch.Tensor): + return ORTC.OrtDevice( + _get_ort_device_type(value.device.type), + ORTC.OrtDevice.default_memory(), + _device_id_or_zero(value.device.index), + ) + elif isinstance( + value, (torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool) + ): + return ORTC.OrtDevice( + _get_ort_device_type("cpu"), ORTC.OrtDevice.default_memory(), 0 + ) + else: + raise ValueError("Unsupported value type: " + str(type(value))) + + if len(values) > 0: + ort_devices = tuple(_map_tensor_or_sym_to_device(value) for value in values) + return ort_devices + else: + return (_map_tensor_or_sym_to_device(1),) + + +def _get_ortvalues_from_torch_tensors( + tensors: Tuple[torch.Tensor, ...], devices: Tuple["ORTC.OrtDevice", ...] +) -> Tuple[torch.Tensor, ...]: + ortvalues = ORTC.OrtValueVector() + ortvalues.reserve(len(tensors)) + dtypes = [] + shapes = [] + data_ptrs = [] + + for tensor in tensors: + dtypes.append(_TORCH_DTYPE_TO_NUMPY_DTYPE[tensor.dtype]) + shapes.append(tensor.size()) + data_ptrs.append(tensor.data_ptr()) + ortvalues.push_back_batch(tensors, data_ptrs, dtypes, shapes, devices) + return ortvalues + + +def _to_real_tensor(tensor: FakeTensor) -> torch.Tensor: + if tensor.is_sparse: + raise ValueError("sparse tensor is not yet supported.") + out = torch.empty(tensor.size(), dtype=tensor.dtype, device=tensor.device) + return out + + +def _adjust_scalar_from_fx_to_onnx( + dynamo_value: Union[ + torch.Tensor, + int, + float, + bool, + ], + value_info: "onnx.ValueInfoProto", # type: ignore[name-defined] +) -> torch.Tensor: + """Helper function to wrap PyTorch variables as torch.Tensor""" + if ( + isinstance(dynamo_value, torch.Tensor) + and len(value_info.type.tensor_type.shape.dim) == 0 + and dynamo_value.shape == (1,) + ): + # ONNX expect a scalar with empty shape. + # In contrast, PyTorch usually allows implicit + # conversion between shape=() and shape=(1,). + # + # Below, PyTorch's shape (1,) is reshaped to (). + return torch.squeeze(dynamo_value) + elif isinstance(dynamo_value, int): + return torch.tensor(dynamo_value, dtype=torch.int64) + elif isinstance(dynamo_value, float): + return torch.tensor(dynamo_value, dtype=torch.float32) + elif isinstance(dynamo_value, bool): + return torch.tensor(dynamo_value, dtype=torch.bool) + else: + assert isinstance(dynamo_value, torch.Tensor) + return dynamo_value.contiguous() + + +def _adjust_scalar_from_onnx_to_fx( + tensor: torch.Tensor, + prim_value: Union[ + torch.Tensor, + torch.SymInt, + int, + torch.SymFloat, + float, + torch.SymBool, + bool, + ], +) -> Union[torch.Tensor, int, float, bool,]: + """Helper function to wrap ORT-produced torch.Tensor as PyTorch variables""" + assert isinstance(tensor, torch.Tensor), "ORT's output must be tensor." + if isinstance( + prim_value, + (torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool), + ): + # Convert tensor back to scalar to match Dynamo's expectation. + return tensor.item() + return tensor + + +def _run_onnx_session_with_ortvaluevector( + sess: "onnxruntime.InferenceSession", + input_names: Tuple[str, ...], + inputs: Tuple[torch.Tensor, ...], + input_devices: Tuple["ORTC.OrtDevice", ...], + output_names: Tuple[str, ...], + outputs: Tuple[torch.Tensor, ...], + output_devices: Tuple["ORTC.OrtDevice", ...], + preallocate_output: bool, + input_value_infos: Tuple["onnx.ValueInfoProto", ...], # type: ignore[name-defined] + normalized_prim_outputs: Tuple[ + Union[ + torch.Tensor, torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool + ], + ..., + ], +) -> Tuple[Union[torch.Tensor, int, float, bool], ...]: + _nvtx_range_push("contiguous") + inputs = tuple( + _adjust_scalar_from_fx_to_onnx(arg, value_info) + for arg, value_info in zip(inputs, input_value_infos) + ) + _nvtx_range_pop() + + _nvtx_range_push("push_back_batch") + ort_inputs = _get_ortvalues_from_torch_tensors(inputs, input_devices) + + # preallocate output pytorch Tensors and use the buffers affined to the torch device for the output ortvalue. + # Because the output ortvalue is not allocated and owned by ort, it does not need to convert the output ortvalue + # to torch Tensor transferring the ownership. + if preallocate_output: + pth_outputs = tuple( + _to_real_tensor(t) if isinstance(t, FakeTensor) else t for t in outputs + ) + ort_outputs = _get_ortvalues_from_torch_tensors(pth_outputs, output_devices) + else: + ort_outputs = ORTC.OrtValueVector() + _nvtx_range_pop() + + _nvtx_range_push("run_with_ortvaluevector") + run_options = onnxruntime.RunOptions() + run_options.add_run_config_entry("disable_synchronize_execution_providers", "1") + sess.run_with_ortvaluevector( + run_options, input_names, ort_inputs, output_names, ort_outputs, output_devices + ) + _nvtx_range_pop() + + # Post-processing step: + # wrap ORT's outputs to the schema represented by + # `prim_output` (obtained by running the original + # torch.fx.GraphModule). + if preallocate_output: + # Profile the ORT-to-PyTorch type cast below + _nvtx_range_push("after run_with_ortvaluevector") + # Outputs are stored on pre-allocated torch.Tensors' memory, + # so this case doesn't need to convert ORTValue to torch.Tensor. + pth_outputs = tuple( + _adjust_scalar_from_onnx_to_fx(onnx_output, prim_output) # type: ignore[misc] + for onnx_output, prim_output in zip(pth_outputs, normalized_prim_outputs) + ) + _nvtx_range_pop() + return pth_outputs + else: + # Profile the two ORT-to-PyTorch type casts below + _nvtx_range_push("after run_with_ortvaluevector") + # Map ORTValue to torch.Tensor. + pth_outputs = onnxruntime.training.ortmodule._utils._ortvalues_to_torch_tensor( + ort_outputs + ) + # Change some torch.Tensor to int, float, bool. + pth_outputs = tuple( + _adjust_scalar_from_onnx_to_fx(onnx_output, prim_output) # type: ignore[misc] + for onnx_output, prim_output in zip(pth_outputs, normalized_prim_outputs) + ) + _nvtx_range_pop() + return pth_outputs + + +def _run_onnx_session_with_fetch( + sess: "onnxruntime.InferenceSession", + input_names: Tuple[str, ...], + inputs: Tuple[torch.Tensor, ...], + input_devices: Tuple["ORTC.OrtDevice", ...], + output_names: Tuple[str, ...], + outputs: Tuple[torch.Tensor, ...], + output_devices: Tuple["ORTC.OrtDevice", ...], + preallocate_output: bool, + input_value_infos: Tuple["onnx.ValueInfoProto", ...], # type: ignore[name-defined] + normalized_prim_outputs: Tuple[ + Union[ + torch.Tensor, torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool + ], + ..., + ], +) -> Tuple[Union[torch.Tensor, int, float, bool], ...]: + inputs = tuple( + _adjust_scalar_from_fx_to_onnx(arg, value_info) + for arg, value_info in zip(inputs, input_value_infos) + ) + feed = { + name: onnxruntime.OrtValue.ortvalue_from_numpy(tensor.cpu().numpy()) + for name, tensor in zip(input_names, inputs) + } + ort_outputs = sess.run(output_names, feed) + pth_outputs = tuple( + _adjust_scalar_from_onnx_to_fx( + torch.from_numpy(value), + prim_output, + ) + for value, prim_output in zip(ort_outputs, normalized_prim_outputs) + ) + return pth_outputs + + +class OrtExecutionInfoPerSession: + """Information required to execute torch.fx.GraphModule using onnxruntime.InferenceSession""" + + def __init__( + self, + session: "onnxruntime.InferenceSession", + input_names: Tuple[str, ...], + input_value_infos: Tuple["onnx.ValueInfoProto", ...], # type: ignore[name-defined] + output_names: Tuple[str, ...], + output_value_infos: Tuple["onnx.ValueInfoProto", ...], # type: ignore[name-defined] + input_devices: Tuple["ORTC.OrtDevice", ...], + output_devices: Tuple["ORTC.OrtDevice", ...], + example_outputs: Union[Tuple[torch.Tensor, ...], torch.Tensor], + ): + # Carrier of ONNX model and its executor. + self.session: onnxruntime.InferenceSession = session + # For the ONNX model stored in self.session, self.input_names[i] is the + # name of the i-th positional input. + self.input_names: Tuple[str, ...] = input_names + # self.input_name[i]'s type information is stored in self.input_value_infos[i]. + self.input_value_infos: Tuple[onnx.ValueInfoProto, ...] = input_value_infos # type: ignore[name-defined] + # Similar to self.input_names, but for outputs. + self.output_names: Tuple[str, ...] = output_names + # Similar to self.input_value_infos but for outputs. + self.output_value_infos: Tuple[onnx.ValueInfoProto, ...] = output_value_infos # type: ignore[name-defined] + # For the ONNX model stored in self.session, self.input_devices[i] is the + # i-th positional input's device. + self.input_devices: Tuple["ORTC.OrtDevice", ...] = input_devices + # Similar to self.input_devices, but for outputs. + self.output_devices: Tuple["ORTC.OrtDevice", ...] = output_devices + # This is the outputs of executing the original torch.fx.GraphModule with example inputs + # (i.e., args passed into OrtBackend._ort_acclerated_call). + self.example_outputs: Union[ + Tuple[torch.Tensor, ...], torch.Tensor + ] = example_outputs + + def is_supported(self, *args): + # Compare the args and the input schema in ONNX model and + # return the first match. + if len(args) != len(self.input_value_infos): + return False + for arg, value_info in zip(args, self.input_value_infos): + if not isinstance(arg, (torch.Tensor, float, int)): + return False + + # Check Python scalars such as int, float, and bool. + if isinstance(arg, (int, float, bool)): + # Map, e.g., float to onnx.TensorProto.FLOAT. + onnx_dtype = from_python_type_to_onnx_tensor_element_type(type(arg)) + if onnx_dtype != value_info.type.tensor_type.elem_type: + return False + if len(value_info.type.tensor_type.shape.dim) != 0: + return False + continue + + # Check tensor. + onnx_dtype = _TORCH_DTYPE_TO_ONNX_TENSOR_ELEMENT_TYPE[arg.dtype] + if onnx_dtype != value_info.type.tensor_type.elem_type: + return False + for dim, onnx_dim in zip(arg.shape, value_info.type.tensor_type.shape.dim): + if isinstance(dim, int) and ( + onnx_dim.dim_value == dim or onnx_dim.dim_param + ): + continue + elif isinstance(dim, torch.SymInt) and onnx_dim.dim_param: + continue + else: + return False + return True + + +@dataclasses.dataclass +class OrtExecutionInfoForAllGraphModules: + def __init__(self): + # All sessions (and their related information) created by exporting the same GraphModule + # with different inputs. + self.execution_info_per_graph_module: Dict[ + torch.fx.GraphModule, List[OrtExecutionInfoPerSession] + ] = {} + + def search_reusable_session_execution_info( + self, graph_module: torch.fx.GraphModule, *args + ): + if graph_module not in self.execution_info_per_graph_module: + return None + # All execution information for ONNX models exported from the same `graph_module` + # with different inputs. + candidates = self.execution_info_per_graph_module[graph_module] + + for candidate in candidates: + if candidate.is_supported(*args): + # Returns the first session that accepts this input schema. + return candidate + # No reusable session found. + return None + + def cache_session_execution_info( + self, graph_module: torch.fx.GraphModule, info: OrtExecutionInfoPerSession + ): + if graph_module not in self.execution_info_per_graph_module: + self.execution_info_per_graph_module[graph_module] = [info] + else: + self.execution_info_per_graph_module[graph_module].append(info) + + +OrtExecutionProvider: TypeAlias = Union[str, Tuple[str, Mapping[str, Any]]] +"""Either the name of an ONNX Runtime execution provider as a string or +a 2-tuple of the name and a dictionary of execution provider options. + +Examples:: + + >>> "CPUExecutionProvider" + + >>> ("CUDAExecutionProvider", {"device_id": 3}) + +""" + + +@dataclasses.dataclass(frozen=True) +@compatibility(is_backward_compatible=False) +class OrtBackendOptions: + """Options for constructing an ``OrtBackend``, the ONNX Runtime + backend (``"onnxrt"``) for ``torch.compile``. + + Example:: + + >>> @torch.compile( + ... backend="onnxrt", + ... options=torch.onnx._OrtBackendOptions(...), + ... ) + ... def ort_function(x): + ... return x ** x + """ + + preferred_execution_providers: Optional[Sequence[OrtExecutionProvider]] = None + """An optional sequence of execution providers to be prioritized ahead of any + execution providers that may be inferred (see ``infer_execution_providers``). + """ + + infer_execution_providers: bool = True + """Whether to infer an execution provider from ``torch.device`` bound to inputs or found in the graph.""" + + default_execution_providers: Optional[Sequence[OrtExecutionProvider]] = None + """The default fallback execution providers. If not specified, one will be + be selected based on the host environment (most likely ``"CPUExecutionProvider"``). + """ + + # preallocate_output allows for allocating output torch Tensor buffers and feeding them to InferenceSession + # in order to avoid internal allocation of output buffers in InferenceSession. + # If output ortvalue returned from InferenceSession is allocated internally, + # it needs to be converted to torch Tensor for return, and the torch Tensor should hold the ownership. + # When a custom torch device is used with a custom aten allocator, the conversion from ortvalue to torch Tensor + # should be supported, which is currently done through dlpack. Note that dlpack might not support a custom torch device. + # It can be avoided by allowing for preallocation for output buffers allocated by a custom aten allocator, + # and use the preallocated output buffers for InferenceSession not holding any ownership for them. + # TODO(wschin): Make it to inference session level flag. + # See https://github.com/pytorch/pytorch/issues/106869. + preallocate_output: bool = False + """If ``True``, allocate memory for ONNX Runtime's outputs on the PyTorch side.""" + + use_aot_autograd: bool = True + """Whether to wrap the ``OrtBackend`` with TorchDynamo's aot_autograd backend + to support training (i.e., backward graphs are also sent to ``OrtBackend``). + + Symbolic execution is used to capture the forward pass and backward passes as a single graph. + Then, a selected graph partition algorithm (``min_cut_rematerialization_partition``) is used + to split the entire graph into forward sub-graph and backward sub-graph. Finally, both + sub-graphs are compiled by ``OrtBackend``. + """ + + export_options: Optional["torch.onnx.ExportOptions"] = None + """Options for the TorchDynamo-based ONNX exporter used by the ``OrtBackend``.""" + + ort_session_options: Optional["onnxruntime.SessionOptions"] = None + """Options for the ``onnxruntime.InferenceSession`` used by the ``OrtBackend``.""" + + pre_ort_model_transforms: Optional[ # type: ignore[name-defined] + Sequence[Callable[["onnx.ModelProto"], None]] + ] = None + """A list of graph transforms to be applied to the ONNX model before it + is fed to ONNXRuntime's InferenceSession.""" + + +@compatibility(is_backward_compatible=False) +class OrtBackend: + """A backend compiles (sub-)graphs in torch.fx.GraphModule to onnxruntime.InferenceSession calls. + + The compiler entry point is OrtBackend.compile, which + 1. partitions the original graph into supported sub-graphs (type: torch.fx.GraphModule) and unsupported + sub-graphs. + 2. For each supported sub-graph, it replaces its _wrapped_call function with _ort_accelerated_call. + 3. Inside _ort_accelerated_call, it creates onnxruntime.InferenceSession and calls it to execute the sub-graph. + """ + + def __init__(self, options: Optional[OrtBackendOptions] = None): + self._options: Final = OrtBackendOptions() if options is None else options + + # options.export_options contains information shared between exporter and DORT. + # For example, they should use the same decomposition table when + # 1. capturing FX graph in torch.compile (see how we create aot_ort in register_backend.py) + # 2. call exporter's API to convert `torch.fx.GraphModule` to ONNX model + # (see onnxfunction_dispatcher passed to FxOnnxInterpreter.run below). + # + # Convert user-facing option to internal option used by ONNX exporter + # to access required information. + # Some useful fields: + # - Decomposition table for decomposing FX operators in exporter is + # self._resolved_onnx_exporter_options.decomposition_table. + # - self._resolved_onnx_exporter_options.onnx_registry records what + # aten/prim ops are supported by exporter and their exporters (type: callable). + self._resolved_onnx_exporter_options = ( + torch.onnx._internal.exporter.ResolvedExportOptions( + torch.onnx.ExportOptions() + if self._options.export_options is None + else self._options.export_options + ) + ) + + # Given DORT's computation flow: + # 1. OrtOperatorSupport uses support_dict and extra_support_dict to select operators + # and send them to DORT. + # 2. Then, DORT exports the selected sub-graphs into ONNX. + # 3. Finally DORT calls ORT to do the computation. + # OrtOperatorSupport and create_onnx_friendly_decomposition_table(...) + # must use the same support_dict. If the support_dict here contains something not + # supported by exporter, exporter will fails in step 2 since the selected graphs may + # contains unsupported operators such as aten::_who_you_are. + # This restriction is automatically done since DORT and exporter shares the same + # self._resolved_onnx_exporter_options. + support_dict = torch.onnx._internal.fx.decomposition_table._create_onnx_supports_op_overload_table( + self._resolved_onnx_exporter_options.onnx_registry + ) + + extra_support_dict: Dict[str, Any] = { + "getattr": None, + # To send operator.getitem to ORT, add the corresponding string + # recognized by PyTorch's OperatorSupport class. + "_operator.getitem": None, + # To send operator.mul to ORT, add the corresponding string + # recognized by PyTorch's OperatorSupport class. + "_operator.mul": None, + "_operator.add": None, + "_operator.sub": None, + } + + self._supported_ops = OrtOperatorSupport(support_dict, extra_support_dict) + # TODO(wschin): this is a naive implementation of cache without proper guard + # See https://github.com/pytorch/pytorch/issues/106868. + self._partitioner_cache: Dict[torch.fx.GraphModule, torch.fx.GraphModule] = {} + # Conceptually, this filed is a 2-layer dictionary + # GraphModule 0 + # ONNX Model 0 (with ORT InferenceSession and related information. type: OrtExecutionInfoPerSession) + # ONNX Model 1 + # ... + # GraphModule 1 + # ONNX Model 2 (with ORT InferenceSession and related information. type: OrtExecutionInfoPerSession) + # ONNX Model 3 + # ... + # ... + # , which caches all previous compilation result so that we can reuse them. + # ONNX Model 0 and 1 are exported from the same GraphModule 0 but with different inputs + # (e.g., tensors with different ranks). GraphModule 0 and GraphModule 1 are different + # graphs captured by Dynamo and sent to OrtBackend.compile. + self._all_ort_execution_info = OrtExecutionInfoForAllGraphModules() + + self._assert_allclose_to_baseline = False + + self.execution_count = 0 + + # Function which invokes ORT do to the real computation. + self.run = ( + _run_onnx_session_with_ortvaluevector + if hasattr(ORTC.OrtValueVector, "push_back_batch") + else _run_onnx_session_with_fetch + ) + + def _select_eps( + self, graph_module: torch.fx.GraphModule, *args + ) -> Sequence[Tuple[str, Mapping[str, Any]]]: + inferred_eps: Tuple[str, ...] = tuple() + if self._options.infer_execution_providers: + if eps_from_args := _infer_ep_from_device(*args): + # If user feeds CUDA tensor as input argument, + # we want to use CUDA EP. + # Thus, `eps_from_args` (deduced from input arguments) + # has highest priority. + inferred_eps = eps_from_args + elif eps_from_graph_module := _infer_ep_from_graph_module(graph_module): + # If there is no EP in input arguments, we deduce EP from + # graph_module's outputs. Those outputs may come from + # FakeTensorProp or Dynamo's built-in symbolic shape inference. + inferred_eps = eps_from_graph_module + + selected_eps = [] + + for ep in ( + *(self._options.preferred_execution_providers or []), + *_sort_eps(inferred_eps), + *(self._options.default_execution_providers or _infer_default_eps()), + ): + if isinstance(ep, str): + ep = (ep, {}) + elif isinstance(ep, tuple) and ep[1] is None: + ep = (ep[0], {}) + if ep is not None and ep not in selected_eps: + selected_eps.append(ep) + + return selected_eps + + def _ort_acclerated_call(self, graph_module: torch.fx.GraphModule, *args, **kwargs): + """This function replaces GraphModule._wrapped_call in compiled model. + + The _wrapped_call is the underlying implementation of forward method. Replacing + it means we delegate the computation to _ort_acclerated_call and therefore + onnxruntime.InferenceSession. + """ + cached_execution_info_per_session = ( + self._all_ort_execution_info.search_reusable_session_execution_info( + graph_module, *args + ) + ) + if cached_execution_info_per_session: + onnx_session = cached_execution_info_per_session.session + input_names = cached_execution_info_per_session.input_names + output_names = cached_execution_info_per_session.output_names + input_value_infos = cached_execution_info_per_session.input_value_infos + output_value_infos = cached_execution_info_per_session.output_value_infos + input_devices = cached_execution_info_per_session.input_devices + output_devices = cached_execution_info_per_session.output_devices + prim_outputs = cached_execution_info_per_session.example_outputs + else: + # It's first time seeing such as graph. Let's make a new session + # (type: onnxruntime.InferenceSession) for it. + + graph_module = torch.onnx._internal.fx.passes.MovePlaceholderToFront( + self._resolved_onnx_exporter_options.diagnostic_context, + graph_module, + ).run() + # Generate reference outputs. They are used to indicate output + # tensors' types and devices when calling ORT. + # + # WARNING: The downstream code should not change prim_outputs and + # this backend should always produces output with schema identical to prim_outputs'. + + if self._resolved_onnx_exporter_options.dynamic_shapes: + # No pre-allocation when dynamic shape is enabled. + self.preallocate_output = False + extracted_outputs = _extract_graph_module_outputs(graph_module) + + def maybe_map_to_meta_val(value): + if hasattr(value, "meta") and "val" in value.meta: + # Select outputs with "val" information. Without "val", + # it's not possible access output_arg.meta["val"].device. + return value.meta["val"] + else: + return value + + prim_outputs = _pytree.tree_map( + maybe_map_to_meta_val, extracted_outputs + ) + else: + try: + prim_outputs = FakeTensorProp(graph_module).propagate( + *args, **kwargs + ) + except Exception: + logger.warning("FakeTensorProb failed for %s", graph_module) + # When FakeTensorProp fails, it is not possible to preallocate output buffers + # because the output shapes are not inferred. + self.preallocate_output = False + + # rethrow FakeTensorProb failure because it is not yet currently handled. + raise + + # Create the object to iterate through the nodes in graph one-by-one + # and calls the corresponding ONNX exporter for each node. + fx_interpreter = fx_onnx_interpreter.FxOnnxInterpreter( + diagnostic_context=self._resolved_onnx_exporter_options.diagnostic_context + ) + # Cast FX variables if they will result schema-mismatch when searching + # for ONNX operator. E.g., add(double_tensor, int_tensor) is fine in PyTorch, + # but ONNX expects add(double_tensor, double_tensor). + graph_module = torch.onnx._internal.fx.passes.InsertTypePromotion( + self._resolved_onnx_exporter_options.diagnostic_context, graph_module + ).run() + # Start the per-node exporting process. It's conceptually a for loop + # scanning through the nodes in the graph. + exported = fx_interpreter.run( + fx_graph_module=graph_module, + onnxfunction_dispatcher=self._resolved_onnx_exporter_options.onnxfunction_dispatcher, + op_level_debug=self._resolved_onnx_exporter_options.op_level_debug, + ) + # Convert the exported result to ONNX ModelProto. + onnx_model = exported.to_model_proto( + opset_version=self._resolved_onnx_exporter_options.onnx_registry.opset_version, + ) + + # Modify ONNX model using pre-registered graph transforms. + # They are in-place modifications for avoiding unnecessary + # copy of ONNX initializers. + if self._options.pre_ort_model_transforms: + for transform in self._options.pre_ort_model_transforms: + transform(onnx_model) + + onnx_model_bytes = onnx_model.SerializeToString() + if os.environ.get("ONNXRT_DUMP_PATH", None): + # If not empty, environment variable ONNXRT_DUMP_PATH defined the path + # where generated onnx files should be stored. + # This module keeps a global variables keeping track of the + # stored models. + # If ONNXRT_DUMP_PATH="dumped/dumped_model_" + # The first file name will be 'dumped/dumped_model_0.onnx'. + # For every dumped model, a text file 'dumped/dumped_model_0.txt' + # is created as well to contain the string representing the graph_module. + _dump_onnx_model(onnx_model_bytes, graph_module=graph_module) + + # Initialize a ORT session to execute this ONNX model. + # Note that TorchDynamo assumes all inputs/outputs are on the + # same device, but it's subject to change (very likely with + # dynamic shape support), so we add execution providers + # based on the logic in _select_eps: (explicitly preferred EPs, + # EPs inferred from inputs or graph, and the fallback default EP)/ + # + # TODO(wschin): enable external allocators. + # See https://github.com/pytorch/pytorch/issues/106867 + onnx_session = onnxruntime.InferenceSession( + path_or_bytes=onnx_model_bytes, + sess_options=self._options.ort_session_options, + providers=self._select_eps(graph_module, *args), + ) + + # Cache ORT session. It's reused for the same "graph_module". + # Generate ONNX model and extract its input and output names. + input_names = tuple(input.name for input in onnx_model.graph.input) + output_names = tuple(output.name for output in onnx_model.graph.output) + input_devices = _get_onnx_devices(args) + # Cache devices for inputs and outputs. They are used to invoke + # ORT session. Output devices indicate where (e.g., GPU or CPU) + # to store outputs + if isinstance(prim_outputs, tuple): + output_devices = _get_onnx_devices(prim_outputs) + else: + output_devices = _get_onnx_devices((prim_outputs,)) + + input_value_infos = tuple(input for input in onnx_model.graph.input) + output_value_infos = tuple(output for output in onnx_model.graph.output) + + execution_info_per_session = OrtExecutionInfoPerSession( + session=onnx_session, + input_names=input_names, + input_value_infos=input_value_infos, + output_names=output_names, + output_value_infos=output_value_infos, + input_devices=input_devices, + output_devices=output_devices, + example_outputs=prim_outputs, + ) + + self._all_ort_execution_info.cache_session_execution_info( + graph_module, execution_info_per_session + ) + + self.execution_count += 1 + + # ORT always returns a tuple of outputs. If the original output is a tensor, + # ORT output's first element must be extracted and returned. Otherwise, type + # mismatch may happen in downstream computation. + is_single_tensor_output = isinstance(prim_outputs, torch.Tensor) + normalized_prim_outputs = ( + (prim_outputs,) if is_single_tensor_output else prim_outputs + ) + assert isinstance(normalized_prim_outputs, tuple) + assert all( + isinstance(elem, (torch.Tensor, torch.SymInt, int)) + for elem in normalized_prim_outputs + ) + + _nvtx_range_push("run_onnx_session_with_ortvaluevector") + onnx_outputs = self.run( + onnx_session, + input_names, + args, + input_devices, + output_names, + normalized_prim_outputs, + output_devices, + self._options.preallocate_output, + input_value_infos, + normalized_prim_outputs, + ) + _nvtx_range_pop() + + if self._assert_allclose_to_baseline: + # Compute baseline. + baseline_outputs = torch._prims.executor.execute( + graph_module, *args, executor="aten" + ) + normalized_baseline_ouptuts = ( + (baseline_outputs,) if is_single_tensor_output else baseline_outputs + ) + # Ensure every output tensor is close to the corresponding baseline. + for onnx_output, baseline_output in zip( + onnx_outputs, normalized_baseline_ouptuts + ): + torch.testing.assert_close(onnx_output, baseline_output) + return onnx_outputs[0] if is_single_tensor_output else onnx_outputs + + def compile(self, graph_module: torch.fx.GraphModule, args) -> torch.fx.GraphModule: + # Deferred import since CapabilityBasedPartitioner is not decorated with + # @compatibility; importing it at the module level will result in the test + # failing: pytest test/test_fx.py -k test_public_api_surface + # because this module is imported into torch.onnx. + from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner + + # FX graph based partitioning based on ONNX supported ops. + # Given a graph module + # GraphModule0 + # node_0 + # node_1 + # node_2 + # node_3 + # node_4 + # If only node_2 is not supported by ONNX, this graph module will be partitioned into + # GraphModule0 + # GraphModule1 + # node_0 + # node_1 + # node_2 + # GraphModule2 + # node_3 + # node_4 + # by calling CapabilityBasedPartitioner.partition_and_fuse. + # Then, GraphModule1's and GraphModule2's forward method (GraphModule._wrapped_call) + # will be replaced by OrtBackend._ort_accelerated_call to delegate computation to ORT. + if graph_module in self._partitioner_cache: + partitioned_prim_graph_module = self._partitioner_cache[graph_module] + else: + prim_graph_module = graph_module + partitioner = CapabilityBasedPartitioner( + prim_graph_module, + self._supported_ops, + allows_single_node_partition=True, + ) + partitioned_prim_graph_module = partitioner.partition_and_fuse() + self._partitioner_cache[graph_module] = partitioned_prim_graph_module + + # Overriding fused_module's __call__() function with ort_acclerated_call() + # This loop goes through all graph partitions (each of them is an ONNX-representable graph) + # and override their _wrapped_call function with _ort_accelerated_call. + # Inside _ort_accelerated_call, the partition's graph is exported into ONNX and executed by ORT. + for node in partitioned_prim_graph_module.graph.nodes: + # TODO(wschin): use a better way to identify fused submodule + # See https://github.com/pytorch/pytorch/issues/106872. + if node.op == "call_module" and "fused_" in node.name: + fused_module = getattr(partitioned_prim_graph_module, node.name) + # self.ort_acclerated_call is responsible for exporting graph to ONNX, + # creating ORT session, and running ORT session. + fused_module._wrapped_call = self._ort_acclerated_call + + return partitioned_prim_graph_module + + def __call__( + self, graph_module: torch.fx.GraphModule, args + ) -> torch.fx.GraphModule: + """If ``OrtBackendOptions.use_aot_autograd`` is ``True``, the `auto_autograd` compiler + will be invoked, wrapping this ``OrtBackend`` instance's ``compile`` method. Otherwise, + the ``compile`` method is invoked directly.""" + if self._options.use_aot_autograd: + from functorch.compile import min_cut_rematerialization_partition + + from torch._dynamo.backends.common import aot_autograd + + return aot_autograd( + fw_compiler=self.compile, + partition_fn=min_cut_rematerialization_partition, + decompositions=self._resolved_onnx_exporter_options.decomposition_table, + )(graph_module, args) + + return self.compile(graph_module, args) + + __instance_cache_max_count: Final = 8 + __instance_cache: Final[List["OrtBackend"]] = [] + + @staticmethod + def get_cached_instance_for_options( + options: Optional[Union[OrtBackendOptions, Mapping[str, Any]]] = None, + ) -> "OrtBackend": + """Returns a possibly cached instance of an ``OrtBackend``. If an existing + backend was created previously through this function with the same options, + it will be returned. Otherwise a new backend will be created, cached, and + returned. + + Note: if ``options`` sets ``ort_session_options``, a new ``OrtBackend`` + will always be returned, since ``onnxruntime.SessionOptions`` cannot + participate in caching.""" + + def reusable(a: OrtBackendOptions, b: OrtBackendOptions): + if ( + a.preferred_execution_providers != b.preferred_execution_providers + or a.infer_execution_providers != b.infer_execution_providers + or a.default_execution_providers != b.default_execution_providers + or a.preallocate_output != b.preallocate_output + or a.use_aot_autograd != b.use_aot_autograd + or a.pre_ort_model_transforms != b.pre_ort_model_transforms + ): + return False + + # onnxruntime.SessionOptions is a pybind11 object, cannot be pickled, + # and holds too much potential state to reasonably check manually; + # ort_session_options is provided at all, the backend does not participate + # in caching. + if a.ort_session_options is not None or b.ort_session_options is not None: + return False + + if a.export_options is b.export_options: + return True + + # Similarly, some objects in ExportOptions are too stateful to use for + # caching. We should revisit this. + if a.export_options is not None and b.export_options is not None: + return ( + a.export_options.dynamic_shapes == b.export_options.dynamic_shapes + and a.export_options.op_level_debug + == b.export_options.op_level_debug + and a.export_options.diagnostic_options + == b.export_options.diagnostic_options + and a.export_options.onnx_registry is b.export_options.onnx_registry + and a.export_options.fake_context is b.export_options.fake_context + ) + + # We can't account for how the two option sets may differ, so it's not safe to reuse. + return False + + if not isinstance(options, OrtBackendOptions): + options = OrtBackendOptions(**(options or {})) + + backend = next( + (b for b in OrtBackend.__instance_cache if reusable(b._options, options)), + None, + ) + + if backend is None: + assert ( + len(OrtBackend.__instance_cache) < OrtBackend.__instance_cache_max_count + ), ( + f"No more than {OrtBackend.__instance_cache_max_count} instances of " + f"{OrtBackend} allowed. Please instantiate `{OrtBackend}` explicitly " + "to pass to `torch.compile`. " + "See https://github.com/pytorch/pytorch/pull/107973#discussion_r1306144795 " + "for discussion." + ) + OrtBackend.__instance_cache.append(backend := OrtBackend(options)) + + return backend + + @staticmethod + def clear_cached_instances(): + OrtBackend.__instance_cache.clear() + + @staticmethod + def get_cached_instances(): + return tuple(OrtBackend.__instance_cache) + + +@compatibility(is_backward_compatible=False) +def torch_compile_backend( + graph_module: torch.fx.GraphModule, + args, + *, + options: Optional[Union[OrtBackendOptions, Mapping[str, Any]]] = None, +): + return OrtBackend.get_cached_instance_for_options(options)(graph_module, args) diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_internal/registration.py b/venv/lib/python3.10/site-packages/torch/onnx/_internal/registration.py new file mode 100644 index 0000000000000000000000000000000000000000..017a2fb7dadfb2146ca629373b9a9d3506ad3b71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_internal/registration.py @@ -0,0 +1,339 @@ +"""Module for handling symbolic function registration.""" + +import warnings +from typing import ( + Callable, + Collection, + Dict, + Generic, + Optional, + Sequence, + Set, + TypeVar, + Union, +) + +from torch.onnx import _constants, errors +from torch.onnx._internal import _beartype + +OpsetVersion = int + + +def _dispatch_opset_version( + target: OpsetVersion, registered_opsets: Collection[OpsetVersion] +) -> Optional[OpsetVersion]: + """Finds the registered opset given a target opset version and the available opsets. + + Args: + target: The target opset version. + registered_opsets: The available opsets. + + Returns: + The registered opset version. + """ + if not registered_opsets: + return None + + descending_registered_versions = sorted(registered_opsets, reverse=True) + # Linear search for the opset version, which is fine since the number of opset + # versions is small. + + if target >= _constants.ONNX_BASE_OPSET: + # Always look down toward opset 1 when the target is >= ONNX_BASE_OPSET (opset 9). + # When a custom op is register at opset 1, we want to be able to discover it as a + # fallback for all opsets >= ONNX_BASE_OPSET. + for version in descending_registered_versions: + if version <= target: + return version + return None + + # target < opset 9. This is the legacy behavior to support opset 7 and opset 8. + # for caffe2 support. We search up toward opset 9. + for version in reversed(descending_registered_versions): + # Count back up until _constants.ONNX_BASE_OPSET + if target <= version <= _constants.ONNX_BASE_OPSET: + return version + + return None + + +_K = TypeVar("_K") +_V = TypeVar("_V") + + +class OverrideDict(Generic[_K, _V], Collection[_K]): + """A dictionary that merges built-in and custom symbolic functions. + + It supports overriding and un-overriding built-in symbolic functions with custom + ones. + """ + + def __init__(self): + self._base: Dict[_K, _V] = {} + self._overrides: Dict[_K, _V] = {} + self._merged: Dict[_K, _V] = {} + + def set_base(self, key: _K, value: _V) -> None: + self._base[key] = value + if key not in self._overrides: + self._merged[key] = value + + def in_base(self, key: _K) -> bool: + """Checks if a key is in the base dictionary.""" + return key in self._base + + def override(self, key: _K, value: _V) -> None: + """Overrides a base key-value with a new pair.""" + self._overrides[key] = value + self._merged[key] = value + + def remove_override(self, key: _K) -> None: + """Un-overrides a key-value pair.""" + self._overrides.pop(key, None) # type: ignore[arg-type] + self._merged.pop(key, None) # type: ignore[arg-type] + if key in self._base: + self._merged[key] = self._base[key] + + def overridden(self, key: _K) -> bool: + """Checks if a key-value pair is overridden.""" + return key in self._overrides + + def __getitem__(self, key: _K) -> _V: + return self._merged[key] + + def get(self, key: _K, default: Optional[_V] = None): + return self._merged.get(key, default) + + def __contains__(self, key: object) -> bool: + return key in self._merged + + def __iter__(self): + return iter(self._merged) + + def __len__(self) -> int: + return len(self._merged) + + def __repr__(self) -> str: + return f"OverrideDict(base={self._base}, overrides={self._overrides})" + + def __bool__(self) -> bool: + return bool(self._merged) + + +class _SymbolicFunctionGroup: + """Different versions of symbolic functions registered to the same name. + + O(number of registered versions of an op) search is performed to find the most + recent version of the op. + + The registration is delayed until op is used to improve startup time. + + Function overloads with different arguments are not allowed. + Custom op overrides are supported. + """ + + def __init__(self, name: str) -> None: + self._name = name + # A dictionary of functions, keyed by the opset version. + self._functions: OverrideDict[OpsetVersion, Callable] = OverrideDict() + + def __repr__(self) -> str: + return f"_SymbolicFunctionGroup({self._name}, registered={self._functions})" + + def __getitem__(self, key: OpsetVersion) -> Callable: + result = self.get(key) + if result is None: + raise KeyError(key) + return result + + # TODO(justinchuby): Add @functools.lru_cache(maxsize=None) if lookup time becomes + # a problem. + def get(self, opset: OpsetVersion) -> Optional[Callable]: + """Find the most recent version of the function.""" + version = _dispatch_opset_version(opset, self._functions) + if version is None: + return None + + return self._functions[version] + + def add(self, func: Callable, opset: OpsetVersion) -> None: + """Adds a symbolic function. + + Args: + func: The function to add. + opset: The opset version of the function to add. + """ + if self._functions.in_base(opset): + warnings.warn( + f"Symbolic function '{self._name}' already registered for opset {opset}. " + f"Replacing the existing function with new function. This is unexpected. " + f"Please report it on {_constants.PYTORCH_GITHUB_ISSUES_URL}.", + errors.OnnxExporterWarning, + ) + self._functions.set_base(opset, func) + + def add_custom(self, func: Callable, opset: OpsetVersion) -> None: + """Adds a custom symbolic function. + + Args: + func: The symbolic function to register. + opset: The corresponding opset version. + """ + self._functions.override(opset, func) + + def remove_custom(self, opset: OpsetVersion) -> None: + """Removes a custom symbolic function. + + Args: + opset: The opset version of the custom function to remove. + """ + if not self._functions.overridden(opset): + warnings.warn( + f"No custom function registered for '{self._name}' opset {opset}" + ) + return + self._functions.remove_override(opset) + + def get_min_supported(self) -> OpsetVersion: + """Returns the lowest built-in opset version supported by the function.""" + return min(self._functions) + + +class SymbolicRegistry: + """Registry for symbolic functions. + + The registry maintains a mapping from qualified names to symbolic functions. + It is used to register new symbolic functions and to dispatch calls to + the appropriate function. + """ + + def __init__(self) -> None: + self._registry: Dict[str, _SymbolicFunctionGroup] = {} + + def register( + self, name: str, opset: OpsetVersion, func: Callable, custom: bool = False + ) -> None: + """Registers a symbolic function. + + Args: + name: The qualified name of the function to register. In the form of 'domain::op'. + E.g. 'aten::add'. + opset: The opset version of the function to register. + func: The symbolic function to register. + custom: Whether the function is a custom function that overrides existing ones. + + Raises: + ValueError: If the separator '::' is not in the name. + """ + if "::" not in name: + raise ValueError( + f"The name must be in the form of 'domain::op', not '{name}'" + ) + symbolic_functions = self._registry.setdefault( + name, _SymbolicFunctionGroup(name) + ) + if custom: + symbolic_functions.add_custom(func, opset) + else: + symbolic_functions.add(func, opset) + + def unregister(self, name: str, opset: OpsetVersion) -> None: + """Unregisters a symbolic function. + + Args: + name: The qualified name of the function to unregister. + opset: The opset version of the function to unregister. + """ + if name not in self._registry: + return + self._registry[name].remove_custom(opset) + + def get_function_group(self, name: str) -> Optional[_SymbolicFunctionGroup]: + """Returns the function group for the given name.""" + return self._registry.get(name) + + def is_registered_op(self, name: str, version: int) -> bool: + """Returns whether the given op is registered for the given opset version.""" + functions = self.get_function_group(name) + if functions is None: + return False + return functions.get(version) is not None + + def all_functions(self) -> Set[str]: + """Returns the set of all registered function names.""" + return set(self._registry) + + +@_beartype.beartype +def onnx_symbolic( + name: str, + opset: Union[OpsetVersion, Sequence[OpsetVersion]], + decorate: Optional[Sequence[Callable]] = None, + custom: bool = False, +) -> Callable: + """Registers a symbolic function. + + Usage:: + + ``` + @onnx_symbolic("aten::symbolic_b", opset=10, decorate=[quantized_aten_handler(scale=1/128, zero_point=0)]) + @symbolic_helper.parse_args("v", "v", "b") + def symbolic_b(g: _C.Graph, x: _C.Value, y: _C.Value, arg1: bool) -> _C.Value: + ... + ``` + + Args: + name: The qualified name of the function in the form of 'domain::op'. + E.g. 'aten::add'. + opset: The opset versions of the function to register at. + decorate: A sequence of decorators to apply to the function. + custom: Whether the function is a custom symbolic function. + + Raises: + ValueError: If the separator '::' is not in the name. + """ + + def wrapper(func: Callable) -> Callable: + decorated = func + if decorate is not None: + for decorate_func in decorate: + decorated = decorate_func(decorated) + + global registry + nonlocal opset + if isinstance(opset, OpsetVersion): + opset = (opset,) + for opset_version in opset: + registry.register(name, opset_version, decorated, custom=custom) + + # Return the original function because the decorators in "decorate" are only + # specific to the instance being registered. + return func + + return wrapper + + +@_beartype.beartype +def custom_onnx_symbolic( + name: str, + opset: Union[OpsetVersion, Sequence[OpsetVersion]], + decorate: Optional[Sequence[Callable]] = None, +) -> Callable: + """Registers a custom symbolic function. + + Args: + name: the qualified name of the function. + opset: the opset version of the function. + decorate: a sequence of decorators to apply to the function. + + Returns: + The decorator. + + Raises: + ValueError: If the separator '::' is not in the name. + """ + return onnx_symbolic(name, opset, decorate, custom=True) + + +# The registry for all symbolic functions. +registry = SymbolicRegistry()