text
stringlengths 145
7.65M
|
---|
===========================================================================================================================================
SOURCE CODE FILE: decorator.py
LINES: 3
SIZE: 5.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\decorator.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import functools
import logging
import traceback
from typing import Any, Callable
from torch.onnx._internal.diagnostics import infra
from torch.onnx._internal.diagnostics.infra import formatter, utils
MessageFormatterType = Callable[..., str]
def format_message_in_text(fn: Callable, *args: Any, **kwargs: Any) -> str:
return f"{formatter.display_name(fn)}. "
def format_exception_in_markdown(exception: Exception) -> str:
msg_list = ["### Exception log", "```"]
msg_list.extend(
traceback.format_exception(type(exception), exception, exception.__traceback__)
)
msg_list.append("```")
return "\n".join(msg_list)
def format_function_signature_in_markdown(
fn: Callable,
args: tuple[Any, ...],
kwargs: dict[str, Any],
format_argument: Callable[[Any], str] = formatter.format_argument,
) -> str:
msg_list = [f"### Function Signature {formatter.display_name(fn)}"]
state = utils.function_state(fn, args, kwargs)
for k, v in state.items():
msg_list.append(f"- {k}: {format_argument(v)}")
return "\n".join(msg_list)
def format_return_values_in_markdown(
return_values: Any,
format_argument: Callable[[Any], str] = formatter.format_argument,
) -> str:
return f"{format_argument(return_values)}"
ModifierCallableType = Callable[
[infra.Diagnostic, Callable, tuple[Any, ...], dict[str, Any], Any], None
]
def diagnose_call(
rule: infra.Rule,
*,
level: infra.Level = infra.Level.NONE,
diagnostic_type: type[infra.Diagnostic] = infra.Diagnostic,
format_argument: Callable[[Any], str] = formatter.format_argument,
diagnostic_message_formatter: MessageFormatterType = format_message_in_text,
) -> Callable:
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
common_error_message = "diagnose_call can only be applied to callables"
if not callable(fn):
raise AssertionError(
f"{common_error_message}. Got {type(fn)} instead of callable."
)
arg0 = args[0] if len(args) > 0 else None
if isinstance(ctx := arg0, infra.DiagnosticContext):
pass
elif isinstance(
ctx := getattr(arg0, "diagnostic_context", None),
infra.DiagnosticContext,
):
pass
else:
# NOTE: At decorate time, it can't tell if a callable is function or method.
# Technically both are regarded as function at that time.
raise AssertionError(
f"{common_error_message}. For {fn}, "
f"If it is a function, a DiagnosticContext instance must be present as "
f"the first argument. "
f"If it is a method, a DiagnosticContext instance must be present as "
f"the attribute 'diagnostic_context' of the 'self' argument."
)
diag = diagnostic_type(
rule,
level,
diagnostic_message_formatter(fn, *args, **kwargs),
)
# pop the decorator frame
# TODO(bowbao): by default diagnostic doesn't have stack.
# So need to check before doing this. Make the code cleaner.
# Option: do not capture stack by default in diagnostic initialization.
stack: infra.Stack | None = None
if len(diag.stacks) > 0:
stack = diag.stacks[0]
stack.frames.pop(0)
# set function location
fn_location = utils.function_location(fn)
diag.locations.insert(0, fn_location)
# Add function location to the top of the stack.
if stack is not None:
stack.frames.insert(0, infra.StackFrame(location=fn_location))
with diag.log_section(logging.INFO, "Function Signature"):
diag.log(
logging.INFO,
"%s",
formatter.LazyString(
format_function_signature_in_markdown,
fn,
args,
kwargs,
format_argument,
),
)
return_values: Any = None
with ctx.add_inflight_diagnostic(diag) as diag:
try:
return_values = fn(*args, **kwargs)
with diag.log_section(logging.INFO, "Return values"):
diag.log(
logging.INFO,
"%s",
formatter.LazyString(
format_return_values_in_markdown,
return_values,
format_argument,
),
)
return return_values
except Exception as e:
diag.log_source_exception(logging.ERROR, e)
diag.level = infra.Level.ERROR
finally:
ctx.log_and_raise_if_error(diag)
return wrapper
return decorator
# TODO(bowbao): decorator to report only when failed.
```
|
===========================================================================================================================================
SOURCE CODE FILE: formatter.py
LINES: 2
SIZE: 2.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\formatter.py
ENCODING: utf-8
```py
from __future__ import annotations
import dataclasses
import json
import re
import traceback
from typing import Any, Callable, Union
from torch._logging import LazyString
from torch.onnx._internal.diagnostics.infra import sarif
# A list of types in the SARIF module to support pretty printing.
# This is solely for type annotation for the functions below.
_SarifClass = Union[
sarif.SarifLog,
sarif.Run,
sarif.ReportingDescriptor,
sarif.Result,
]
def lazy_format_exception(exception: Exception) -> LazyString:
return LazyString(
lambda: "\n".join(
(
"```",
*traceback.format_exception(
type(exception), exception, exception.__traceback__
),
"```",
)
),
)
def snake_case_to_camel_case(s: str) -> str:
splits = s.split("_")
if len(splits) <= 1:
return s
return "".join([splits[0], *map(str.capitalize, splits[1:])])
def camel_case_to_snake_case(s: str) -> str:
return re.sub(r"([A-Z])", r"_\1", s).lower()
def kebab_case_to_snake_case(s: str) -> str:
return s.replace("-", "_")
def _convert_key(
object: dict[str, Any] | Any, convert: Callable[[str], str]
) -> dict[str, Any] | Any:
"""Convert and update keys in a dictionary with "convert".
Any value that is a dictionary will be recursively updated.
Any value that is a list will be recursively searched.
Args:
object: The object to update.
convert: The function to convert the keys, e.g. `kebab_case_to_snake_case`.
Returns:
The updated object.
"""
if not isinstance(object, dict):
return object
new_dict = {}
for k, v in object.items():
new_k = convert(k)
if isinstance(v, dict):
new_v = _convert_key(v, convert)
elif isinstance(v, list):
new_v = [_convert_key(elem, convert) for elem in v]
else:
new_v = v
if new_v is None:
# Otherwise unnecessarily bloated sarif log with "null"s.
continue
if new_v == -1:
# WAR: -1 as default value shouldn't be logged into sarif.
continue
new_dict[new_k] = new_v
return new_dict
def sarif_to_json(attr_cls_obj: _SarifClass, indent: str | None = " ") -> str:
dict = dataclasses.asdict(attr_cls_obj)
dict = _convert_key(dict, snake_case_to_camel_case)
return json.dumps(dict, indent=indent, separators=(",", ":"))
def format_argument(obj: Any) -> str:
return f"{type(obj)}"
def display_name(fn: Callable) -> str:
if hasattr(fn, "__qualname__"):
return fn.__qualname__
elif hasattr(fn, "__name__"):
return fn.__name__
else:
return str(fn)
```
|
================================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 4.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\__init__.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from torch.onnx._internal.diagnostics.infra.sarif._address import Address
from torch.onnx._internal.diagnostics.infra.sarif._artifact import Artifact
from torch.onnx._internal.diagnostics.infra.sarif._artifact_change import ArtifactChange
from torch.onnx._internal.diagnostics.infra.sarif._artifact_content import (
ArtifactContent,
)
from torch.onnx._internal.diagnostics.infra.sarif._artifact_location import (
ArtifactLocation,
)
from torch.onnx._internal.diagnostics.infra.sarif._attachment import Attachment
from torch.onnx._internal.diagnostics.infra.sarif._code_flow import CodeFlow
from torch.onnx._internal.diagnostics.infra.sarif._configuration_override import (
ConfigurationOverride,
)
from torch.onnx._internal.diagnostics.infra.sarif._conversion import Conversion
from torch.onnx._internal.diagnostics.infra.sarif._edge import Edge
from torch.onnx._internal.diagnostics.infra.sarif._edge_traversal import EdgeTraversal
from torch.onnx._internal.diagnostics.infra.sarif._exception import Exception
from torch.onnx._internal.diagnostics.infra.sarif._external_properties import (
ExternalProperties,
)
from torch.onnx._internal.diagnostics.infra.sarif._external_property_file_reference import (
ExternalPropertyFileReference,
)
from torch.onnx._internal.diagnostics.infra.sarif._external_property_file_references import (
ExternalPropertyFileReferences,
)
from torch.onnx._internal.diagnostics.infra.sarif._fix import Fix
from torch.onnx._internal.diagnostics.infra.sarif._graph import Graph
from torch.onnx._internal.diagnostics.infra.sarif._graph_traversal import GraphTraversal
from torch.onnx._internal.diagnostics.infra.sarif._invocation import Invocation
from torch.onnx._internal.diagnostics.infra.sarif._location import Location
from torch.onnx._internal.diagnostics.infra.sarif._location_relationship import (
LocationRelationship,
)
from torch.onnx._internal.diagnostics.infra.sarif._logical_location import (
LogicalLocation,
)
from torch.onnx._internal.diagnostics.infra.sarif._message import Message
from torch.onnx._internal.diagnostics.infra.sarif._multiformat_message_string import (
MultiformatMessageString,
)
from torch.onnx._internal.diagnostics.infra.sarif._node import Node
from torch.onnx._internal.diagnostics.infra.sarif._notification import Notification
from torch.onnx._internal.diagnostics.infra.sarif._physical_location import (
PhysicalLocation,
)
from torch.onnx._internal.diagnostics.infra.sarif._property_bag import PropertyBag
from torch.onnx._internal.diagnostics.infra.sarif._rectangle import Rectangle
from torch.onnx._internal.diagnostics.infra.sarif._region import Region
from torch.onnx._internal.diagnostics.infra.sarif._replacement import Replacement
from torch.onnx._internal.diagnostics.infra.sarif._reporting_configuration import (
ReportingConfiguration,
)
from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor import (
ReportingDescriptor,
)
from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor_reference import (
ReportingDescriptorReference,
)
from torch.onnx._internal.diagnostics.infra.sarif._reporting_descriptor_relationship import (
ReportingDescriptorRelationship,
)
from torch.onnx._internal.diagnostics.infra.sarif._result import Result
from torch.onnx._internal.diagnostics.infra.sarif._result_provenance import (
ResultProvenance,
)
from torch.onnx._internal.diagnostics.infra.sarif._run import Run
from torch.onnx._internal.diagnostics.infra.sarif._run_automation_details import (
RunAutomationDetails,
)
from torch.onnx._internal.diagnostics.infra.sarif._sarif_log import SarifLog
from torch.onnx._internal.diagnostics.infra.sarif._special_locations import (
SpecialLocations,
)
from torch.onnx._internal.diagnostics.infra.sarif._stack import Stack
from torch.onnx._internal.diagnostics.infra.sarif._stack_frame import StackFrame
from torch.onnx._internal.diagnostics.infra.sarif._suppression import Suppression
from torch.onnx._internal.diagnostics.infra.sarif._thread_flow import ThreadFlow
from torch.onnx._internal.diagnostics.infra.sarif._thread_flow_location import (
ThreadFlowLocation,
)
from torch.onnx._internal.diagnostics.infra.sarif._tool import Tool
from torch.onnx._internal.diagnostics.infra.sarif._tool_component import ToolComponent
from torch.onnx._internal.diagnostics.infra.sarif._tool_component_reference import (
ToolComponentReference,
)
from torch.onnx._internal.diagnostics.infra.sarif._translation_metadata import (
TranslationMetadata,
)
from torch.onnx._internal.diagnostics.infra.sarif._version_control_details import (
VersionControlDetails,
)
from torch.onnx._internal.diagnostics.infra.sarif._web_request import WebRequest
from torch.onnx._internal.diagnostics.infra.sarif._web_response import WebResponse
# flake8: noqa
```
|
================================================================================================================================================
SOURCE CODE FILE: _address.py
LINES: 1
SIZE: 1.75 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_address.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
@dataclasses.dataclass
class Address(object):
"""A physical or virtual address, or a range of addresses, in an 'addressable region' (memory or a binary file)."""
absolute_address: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "absoluteAddress"}
)
fully_qualified_name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "fullyQualifiedName"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
kind: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "kind"}
)
length: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "length"}
)
name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "name"}
)
offset_from_parent: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "offsetFromParent"}
)
parent_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "parentIndex"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
relative_address: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "relativeAddress"}
)
# flake8: noqa
```
|
=================================================================================================================================================
SOURCE CODE FILE: _artifact.py
LINES: 1
SIZE: 3.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_artifact.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_content,
_artifact_location,
_message,
_property_bag,
)
@dataclasses.dataclass
class Artifact(object):
"""A single artifact. In some cases, this artifact might be nested within another artifact."""
contents: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
default=None, metadata={"schema_property_name": "contents"}
)
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
encoding: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "encoding"}
)
hashes: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "hashes"}
)
last_modified_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "lastModifiedTimeUtc"}
)
length: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "length"}
)
location: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
mime_type: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "mimeType"}
)
offset: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "offset"}
)
parent_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "parentIndex"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
roles: Optional[
List[
Literal[
"analysisTarget",
"attachment",
"responseFile",
"resultFile",
"standardStream",
"tracedFile",
"unmodified",
"modified",
"added",
"deleted",
"renamed",
"uncontrolled",
"driver",
"extension",
"translation",
"taxonomy",
"policy",
"referencedOnCommandLine",
"memoryContents",
"directory",
"userSpecifiedConfiguration",
"toolSpecifiedConfiguration",
"debugOutputFile",
]
]
] = dataclasses.field(default=None, metadata={"schema_property_name": "roles"})
source_language: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "sourceLanguage"}
)
# flake8: noqa
```
|
========================================================================================================================================================
SOURCE CODE FILE: _artifact_change.py
LINES: 1
SIZE: 0.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_artifact_change.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_property_bag,
_replacement,
)
@dataclasses.dataclass
class ArtifactChange(object):
"""A change to a single artifact."""
artifact_location: _artifact_location.ArtifactLocation = dataclasses.field(
metadata={"schema_property_name": "artifactLocation"}
)
replacements: List[_replacement.Replacement] = dataclasses.field(
metadata={"schema_property_name": "replacements"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=========================================================================================================================================================
SOURCE CODE FILE: _artifact_content.py
LINES: 1
SIZE: 1.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_artifact_content.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_multiformat_message_string,
_property_bag,
)
@dataclasses.dataclass
class ArtifactContent(object):
"""Represents the contents of an artifact."""
binary: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "binary"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
rendered: Optional[_multiformat_message_string.MultiformatMessageString] = (
dataclasses.field(default=None, metadata={"schema_property_name": "rendered"})
)
text: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "text"}
)
# flake8: noqa
```
|
==========================================================================================================================================================
SOURCE CODE FILE: _artifact_location.py
LINES: 1
SIZE: 1.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_artifact_location.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
@dataclasses.dataclass
class ArtifactLocation(object):
"""Specifies the location of an artifact."""
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "uri"}
)
uri_base_id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "uriBaseId"}
)
# flake8: noqa
```
|
===================================================================================================================================================
SOURCE CODE FILE: _attachment.py
LINES: 1
SIZE: 1.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_attachment.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_message,
_property_bag,
_rectangle,
_region,
)
@dataclasses.dataclass
class Attachment(object):
"""An artifact relevant to a result."""
artifact_location: _artifact_location.ArtifactLocation = dataclasses.field(
metadata={"schema_property_name": "artifactLocation"}
)
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
rectangles: Optional[List[_rectangle.Rectangle]] = dataclasses.field(
default=None, metadata={"schema_property_name": "rectangles"}
)
regions: Optional[List[_region.Region]] = dataclasses.field(
default=None, metadata={"schema_property_name": "regions"}
)
# flake8: noqa
```
|
==================================================================================================================================================
SOURCE CODE FILE: _code_flow.py
LINES: 1
SIZE: 0.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_code_flow.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_message,
_property_bag,
_thread_flow,
)
@dataclasses.dataclass
class CodeFlow(object):
"""A set of threadFlows which together describe a pattern of code execution relevant to detecting a result."""
thread_flows: List[_thread_flow.ThreadFlow] = dataclasses.field(
metadata={"schema_property_name": "threadFlows"}
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
===============================================================================================================================================================
SOURCE CODE FILE: _configuration_override.py
LINES: 1
SIZE: 1.01 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_configuration_override.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_property_bag,
_reporting_configuration,
_reporting_descriptor_reference,
)
@dataclasses.dataclass
class ConfigurationOverride(object):
"""Information about how a specific rule or notification was reconfigured at runtime."""
configuration: _reporting_configuration.ReportingConfiguration = dataclasses.field(
metadata={"schema_property_name": "configuration"}
)
descriptor: _reporting_descriptor_reference.ReportingDescriptorReference = (
dataclasses.field(metadata={"schema_property_name": "descriptor"})
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
===================================================================================================================================================
SOURCE CODE FILE: _conversion.py
LINES: 1
SIZE: 1.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_conversion.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_invocation,
_property_bag,
_tool,
)
@dataclasses.dataclass
class Conversion(object):
"""Describes how a converter transformed the output of a static analysis tool from the analysis tool's native output format into the SARIF format."""
tool: _tool.Tool = dataclasses.field(metadata={"schema_property_name": "tool"})
analysis_tool_log_files: Optional[List[_artifact_location.ArtifactLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "analysisToolLogFiles"}
)
)
invocation: Optional[_invocation.Invocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "invocation"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=============================================================================================================================================
SOURCE CODE FILE: _edge.py
LINES: 1
SIZE: 0.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_edge.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
@dataclasses.dataclass
class Edge(object):
"""Represents a directed edge in a graph."""
id: str = dataclasses.field(metadata={"schema_property_name": "id"})
source_node_id: str = dataclasses.field(
metadata={"schema_property_name": "sourceNodeId"}
)
target_node_id: str = dataclasses.field(
metadata={"schema_property_name": "targetNodeId"}
)
label: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "label"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=======================================================================================================================================================
SOURCE CODE FILE: _edge_traversal.py
LINES: 1
SIZE: 1.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_edge_traversal.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
@dataclasses.dataclass
class EdgeTraversal(object):
"""Represents the traversal of a single edge during a graph traversal."""
edge_id: str = dataclasses.field(metadata={"schema_property_name": "edgeId"})
final_state: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "finalState"}
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
step_over_edge_count: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "stepOverEdgeCount"}
)
# flake8: noqa
```
|
==================================================================================================================================================
SOURCE CODE FILE: _exception.py
LINES: 1
SIZE: 1.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_exception.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_exception,
_property_bag,
_stack,
)
@dataclasses.dataclass
class Exception(object):
"""Describes a runtime exception encountered during the execution of an analysis tool."""
inner_exceptions: Optional[List[_exception.Exception]] = dataclasses.field(
default=None, metadata={"schema_property_name": "innerExceptions"}
)
kind: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "kind"}
)
message: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
stack: Optional[_stack.Stack] = dataclasses.field(
default=None, metadata={"schema_property_name": "stack"}
)
# flake8: noqa
```
|
============================================================================================================================================================
SOURCE CODE FILE: _external_properties.py
LINES: 1
SIZE: 3.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_external_properties.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_address,
_artifact,
_conversion,
_graph,
_invocation,
_logical_location,
_property_bag,
_result,
_thread_flow_location,
_tool_component,
_web_request,
_web_response,
)
@dataclasses.dataclass
class ExternalProperties(object):
"""The top-level element of an external property file."""
addresses: Optional[List[_address.Address]] = dataclasses.field(
default=None, metadata={"schema_property_name": "addresses"}
)
artifacts: Optional[List[_artifact.Artifact]] = dataclasses.field(
default=None, metadata={"schema_property_name": "artifacts"}
)
conversion: Optional[_conversion.Conversion] = dataclasses.field(
default=None, metadata={"schema_property_name": "conversion"}
)
driver: Optional[_tool_component.ToolComponent] = dataclasses.field(
default=None, metadata={"schema_property_name": "driver"}
)
extensions: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "extensions"}
)
externalized_properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "externalizedProperties"}
)
graphs: Optional[List[_graph.Graph]] = dataclasses.field(
default=None, metadata={"schema_property_name": "graphs"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
invocations: Optional[List[_invocation.Invocation]] = dataclasses.field(
default=None, metadata={"schema_property_name": "invocations"}
)
logical_locations: Optional[List[_logical_location.LogicalLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "logicalLocations"}
)
)
policies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "policies"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
results: Optional[List[_result.Result]] = dataclasses.field(
default=None, metadata={"schema_property_name": "results"}
)
run_guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "runGuid"}
)
schema: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "schema"}
)
taxonomies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "taxonomies"}
)
thread_flow_locations: Optional[List[_thread_flow_location.ThreadFlowLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "threadFlowLocations"}
)
)
translations: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "translations"}
)
version: Optional[Literal["2.1.0"]] = dataclasses.field(
default=None, metadata={"schema_property_name": "version"}
)
web_requests: Optional[List[_web_request.WebRequest]] = dataclasses.field(
default=None, metadata={"schema_property_name": "webRequests"}
)
web_responses: Optional[List[_web_response.WebResponse]] = dataclasses.field(
default=None, metadata={"schema_property_name": "webResponses"}
)
# flake8: noqa
```
|
=========================================================================================================================================================================
SOURCE CODE FILE: _external_property_file_reference.py
LINES: 1
SIZE: 1.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_external_property_file_reference.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_property_bag,
)
@dataclasses.dataclass
class ExternalPropertyFileReference(object):
"""Contains information that enables a SARIF consumer to locate the external property file that contains the value of an externalized property associated with the run."""
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
item_count: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "itemCount"}
)
location: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
==========================================================================================================================================================================
SOURCE CODE FILE: _external_property_file_references.py
LINES: 1
SIZE: 3.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_external_property_file_references.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_external_property_file_reference,
_property_bag,
)
@dataclasses.dataclass
class ExternalPropertyFileReferences(object):
"""References to external property files that should be inlined with the content of a root log file."""
addresses: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "addresses"})
artifacts: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "artifacts"})
conversion: Optional[
_external_property_file_reference.ExternalPropertyFileReference
] = dataclasses.field(default=None, metadata={"schema_property_name": "conversion"})
driver: Optional[
_external_property_file_reference.ExternalPropertyFileReference
] = dataclasses.field(default=None, metadata={"schema_property_name": "driver"})
extensions: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "extensions"})
externalized_properties: Optional[
_external_property_file_reference.ExternalPropertyFileReference
] = dataclasses.field(
default=None, metadata={"schema_property_name": "externalizedProperties"}
)
graphs: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "graphs"})
invocations: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "invocations"}
)
logical_locations: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "logicalLocations"}
)
policies: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "policies"})
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
results: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "results"})
taxonomies: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "taxonomies"})
thread_flow_locations: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "threadFlowLocations"}
)
translations: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "translations"}
)
web_requests: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "webRequests"}
)
web_responses: Optional[
List[_external_property_file_reference.ExternalPropertyFileReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "webResponses"}
)
# flake8: noqa
```
|
============================================================================================================================================
SOURCE CODE FILE: _fix.py
LINES: 1
SIZE: 1.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_fix.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_change,
_message,
_property_bag,
)
@dataclasses.dataclass
class Fix(object):
"""A proposed fix for the problem represented by a result object. A fix specifies a set of artifacts to modify. For each artifact, it specifies a set of bytes to remove, and provides a set of new bytes to replace them."""
artifact_changes: List[_artifact_change.ArtifactChange] = dataclasses.field(
metadata={"schema_property_name": "artifactChanges"}
)
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
==============================================================================================================================================
SOURCE CODE FILE: _graph.py
LINES: 1
SIZE: 1.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_graph.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_edge,
_message,
_node,
_property_bag,
)
@dataclasses.dataclass
class Graph(object):
"""A network of nodes and directed edges that describes some aspect of the structure of the code (for example, a call graph)."""
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
edges: Optional[List[_edge.Edge]] = dataclasses.field(
default=None, metadata={"schema_property_name": "edges"}
)
nodes: Optional[List[_node.Node]] = dataclasses.field(
default=None, metadata={"schema_property_name": "nodes"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
========================================================================================================================================================
SOURCE CODE FILE: _graph_traversal.py
LINES: 1
SIZE: 1.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_graph_traversal.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_edge_traversal,
_message,
_property_bag,
)
@dataclasses.dataclass
class GraphTraversal(object):
"""Represents a path through a graph."""
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
edge_traversals: Optional[List[_edge_traversal.EdgeTraversal]] = dataclasses.field(
default=None, metadata={"schema_property_name": "edgeTraversals"}
)
immutable_state: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "immutableState"}
)
initial_state: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "initialState"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
result_graph_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "resultGraphIndex"}
)
run_graph_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "runGraphIndex"}
)
# flake8: noqa
```
|
===================================================================================================================================================
SOURCE CODE FILE: _invocation.py
LINES: 1
SIZE: 4.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_invocation.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_configuration_override,
_notification,
_property_bag,
)
@dataclasses.dataclass
class Invocation(object):
"""The runtime environment of the analysis tool run."""
execution_successful: bool = dataclasses.field(
metadata={"schema_property_name": "executionSuccessful"}
)
account: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "account"}
)
arguments: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "arguments"}
)
command_line: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "commandLine"}
)
end_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "endTimeUtc"}
)
environment_variables: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "environmentVariables"}
)
executable_location: Optional[_artifact_location.ArtifactLocation] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "executableLocation"}
)
)
exit_code: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "exitCode"}
)
exit_code_description: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "exitCodeDescription"}
)
exit_signal_name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "exitSignalName"}
)
exit_signal_number: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "exitSignalNumber"}
)
machine: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "machine"}
)
notification_configuration_overrides: Optional[
List[_configuration_override.ConfigurationOverride]
] = dataclasses.field(
default=None,
metadata={"schema_property_name": "notificationConfigurationOverrides"},
)
process_id: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "processId"}
)
process_start_failure_message: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "processStartFailureMessage"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
response_files: Optional[List[_artifact_location.ArtifactLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "responseFiles"}
)
)
rule_configuration_overrides: Optional[
List[_configuration_override.ConfigurationOverride]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "ruleConfigurationOverrides"}
)
start_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "startTimeUtc"}
)
stderr: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "stderr"}
)
stdin: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "stdin"}
)
stdout: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "stdout"}
)
stdout_stderr: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "stdoutStderr"}
)
tool_configuration_notifications: Optional[List[_notification.Notification]] = (
dataclasses.field(
default=None,
metadata={"schema_property_name": "toolConfigurationNotifications"},
)
)
tool_execution_notifications: Optional[List[_notification.Notification]] = (
dataclasses.field(
default=None,
metadata={"schema_property_name": "toolExecutionNotifications"},
)
)
working_directory: Optional[_artifact_location.ArtifactLocation] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "workingDirectory"}
)
)
# flake8: noqa
```
|
=================================================================================================================================================
SOURCE CODE FILE: _location.py
LINES: 1
SIZE: 1.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_location.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_location_relationship,
_logical_location,
_message,
_physical_location,
_property_bag,
_region,
)
@dataclasses.dataclass
class Location(object):
"""A location within a programming artifact."""
annotations: Optional[List[_region.Region]] = dataclasses.field(
default=None, metadata={"schema_property_name": "annotations"}
)
id: int = dataclasses.field(default=-1, metadata={"schema_property_name": "id"})
logical_locations: Optional[List[_logical_location.LogicalLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "logicalLocations"}
)
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
physical_location: Optional[_physical_location.PhysicalLocation] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "physicalLocation"}
)
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
relationships: Optional[List[_location_relationship.LocationRelationship]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "relationships"}
)
)
# flake8: noqa
```
|
==============================================================================================================================================================
SOURCE CODE FILE: _location_relationship.py
LINES: 1
SIZE: 0.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_location_relationship.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
@dataclasses.dataclass
class LocationRelationship(object):
"""Information about the relation of one location to another."""
target: int = dataclasses.field(metadata={"schema_property_name": "target"})
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
kinds: List[str] = dataclasses.field(
default_factory=lambda: ["relevant"], metadata={"schema_property_name": "kinds"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=========================================================================================================================================================
SOURCE CODE FILE: _logical_location.py
LINES: 1
SIZE: 1.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_logical_location.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
@dataclasses.dataclass
class LogicalLocation(object):
"""A logical location of a construct that produced a result."""
decorated_name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "decoratedName"}
)
fully_qualified_name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "fullyQualifiedName"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
kind: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "kind"}
)
name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "name"}
)
parent_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "parentIndex"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
================================================================================================================================================
SOURCE CODE FILE: _message.py
LINES: 1
SIZE: 1.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_message.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
@dataclasses.dataclass
class Message(object):
"""Encapsulates a message intended to be read by the end user."""
arguments: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "arguments"}
)
id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "id"}
)
markdown: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "markdown"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
text: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "text"}
)
# flake8: noqa
```
|
===================================================================================================================================================================
SOURCE CODE FILE: _multiformat_message_string.py
LINES: 1
SIZE: 0.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_multiformat_message_string.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
@dataclasses.dataclass
class MultiformatMessageString(object):
"""A message string or message format string rendered in multiple formats."""
text: str = dataclasses.field(metadata={"schema_property_name": "text"})
markdown: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "markdown"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=============================================================================================================================================
SOURCE CODE FILE: _node.py
LINES: 1
SIZE: 1.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_node.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_location,
_message,
_node,
_property_bag,
)
@dataclasses.dataclass
class Node(object):
"""Represents a node in a graph."""
id: str = dataclasses.field(metadata={"schema_property_name": "id"})
children: Optional[List[_node.Node]] = dataclasses.field(
default=None, metadata={"schema_property_name": "children"}
)
label: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "label"}
)
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=====================================================================================================================================================
SOURCE CODE FILE: _notification.py
LINES: 1
SIZE: 1.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_notification.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_exception,
_location,
_message,
_property_bag,
_reporting_descriptor_reference,
)
@dataclasses.dataclass
class Notification(object):
"""Describes a condition relevant to the tool itself, as opposed to being relevant to a target being analyzed by the tool."""
message: _message.Message = dataclasses.field(
metadata={"schema_property_name": "message"}
)
associated_rule: Optional[
_reporting_descriptor_reference.ReportingDescriptorReference
] = dataclasses.field(
default=None, metadata={"schema_property_name": "associatedRule"}
)
descriptor: Optional[
_reporting_descriptor_reference.ReportingDescriptorReference
] = dataclasses.field(default=None, metadata={"schema_property_name": "descriptor"})
exception: Optional[_exception.Exception] = dataclasses.field(
default=None, metadata={"schema_property_name": "exception"}
)
level: Literal["none", "note", "warning", "error"] = dataclasses.field(
default="warning", metadata={"schema_property_name": "level"}
)
locations: Optional[List[_location.Location]] = dataclasses.field(
default=None, metadata={"schema_property_name": "locations"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
thread_id: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "threadId"}
)
time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "timeUtc"}
)
# flake8: noqa
```
|
==========================================================================================================================================================
SOURCE CODE FILE: _physical_location.py
LINES: 1
SIZE: 1.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_physical_location.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_address,
_artifact_location,
_property_bag,
_region,
)
@dataclasses.dataclass
class PhysicalLocation(object):
"""A physical location relevant to a result. Specifies a reference to a programming artifact together with a range of bytes or characters within that artifact."""
address: Optional[_address.Address] = dataclasses.field(
default=None, metadata={"schema_property_name": "address"}
)
artifact_location: Optional[_artifact_location.ArtifactLocation] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "artifactLocation"}
)
)
context_region: Optional[_region.Region] = dataclasses.field(
default=None, metadata={"schema_property_name": "contextRegion"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
region: Optional[_region.Region] = dataclasses.field(
default=None, metadata={"schema_property_name": "region"}
)
# flake8: noqa
```
|
=====================================================================================================================================================
SOURCE CODE FILE: _property_bag.py
LINES: 1
SIZE: 0.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_property_bag.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
@dataclasses.dataclass
class PropertyBag(object):
"""Key/value pairs that provide additional information about the object."""
tags: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "tags"}
)
# flake8: noqa
```
|
==================================================================================================================================================
SOURCE CODE FILE: _rectangle.py
LINES: 1
SIZE: 1.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_rectangle.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
@dataclasses.dataclass
class Rectangle(object):
"""An area within an image."""
bottom: Optional[float] = dataclasses.field(
default=None, metadata={"schema_property_name": "bottom"}
)
left: Optional[float] = dataclasses.field(
default=None, metadata={"schema_property_name": "left"}
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
right: Optional[float] = dataclasses.field(
default=None, metadata={"schema_property_name": "right"}
)
top: Optional[float] = dataclasses.field(
default=None, metadata={"schema_property_name": "top"}
)
# flake8: noqa
```
|
===============================================================================================================================================
SOURCE CODE FILE: _region.py
LINES: 1
SIZE: 2.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_region.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_content,
_message,
_property_bag,
)
@dataclasses.dataclass
class Region(object):
"""A region within an artifact where a result was detected."""
byte_length: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "byteLength"}
)
byte_offset: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "byteOffset"}
)
char_length: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "charLength"}
)
char_offset: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "charOffset"}
)
end_column: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "endColumn"}
)
end_line: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "endLine"}
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
snippet: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
default=None, metadata={"schema_property_name": "snippet"}
)
source_language: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "sourceLanguage"}
)
start_column: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "startColumn"}
)
start_line: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "startLine"}
)
# flake8: noqa
```
|
====================================================================================================================================================
SOURCE CODE FILE: _replacement.py
LINES: 1
SIZE: 0.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_replacement.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_content,
_property_bag,
_region,
)
@dataclasses.dataclass
class Replacement(object):
"""The replacement of a single region of an artifact."""
deleted_region: _region.Region = dataclasses.field(
metadata={"schema_property_name": "deletedRegion"}
)
inserted_content: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
default=None, metadata={"schema_property_name": "insertedContent"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
================================================================================================================================================================
SOURCE CODE FILE: _reporting_configuration.py
LINES: 1
SIZE: 1.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_reporting_configuration.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
@dataclasses.dataclass
class ReportingConfiguration(object):
"""Information about a rule or notification that can be configured at runtime."""
enabled: bool = dataclasses.field(
default=True, metadata={"schema_property_name": "enabled"}
)
level: Literal["none", "note", "warning", "error"] = dataclasses.field(
default="warning", metadata={"schema_property_name": "level"}
)
parameters: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "parameters"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
rank: float = dataclasses.field(
default=-1.0, metadata={"schema_property_name": "rank"}
)
# flake8: noqa
```
|
=============================================================================================================================================================
SOURCE CODE FILE: _reporting_descriptor.py
LINES: 1
SIZE: 2.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_reporting_descriptor.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_multiformat_message_string,
_property_bag,
_reporting_configuration,
_reporting_descriptor_relationship,
)
@dataclasses.dataclass
class ReportingDescriptor(object):
"""Metadata that describes a specific report produced by the tool, as part of the analysis it provides or its runtime reporting."""
id: str = dataclasses.field(metadata={"schema_property_name": "id"})
default_configuration: Optional[_reporting_configuration.ReportingConfiguration] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "defaultConfiguration"}
)
)
deprecated_guids: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "deprecatedGuids"}
)
deprecated_ids: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "deprecatedIds"}
)
deprecated_names: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "deprecatedNames"}
)
full_description: Optional[_multiformat_message_string.MultiformatMessageString] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "fullDescription"}
)
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
help: Optional[_multiformat_message_string.MultiformatMessageString] = (
dataclasses.field(default=None, metadata={"schema_property_name": "help"})
)
help_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "helpUri"}
)
message_strings: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "messageStrings"}
)
name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "name"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
relationships: Optional[
List[_reporting_descriptor_relationship.ReportingDescriptorRelationship]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "relationships"}
)
short_description: Optional[
_multiformat_message_string.MultiformatMessageString
] = dataclasses.field(
default=None, metadata={"schema_property_name": "shortDescription"}
)
# flake8: noqa
```
|
=======================================================================================================================================================================
SOURCE CODE FILE: _reporting_descriptor_reference.py
LINES: 1
SIZE: 1.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_reporting_descriptor_reference.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_property_bag,
_tool_component_reference,
)
@dataclasses.dataclass
class ReportingDescriptorReference(object):
"""Information about how to locate a relevant reporting descriptor."""
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "id"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
tool_component: Optional[_tool_component_reference.ToolComponentReference] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "toolComponent"}
)
)
# flake8: noqa
```
|
==========================================================================================================================================================================
SOURCE CODE FILE: _reporting_descriptor_relationship.py
LINES: 1
SIZE: 1.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_reporting_descriptor_relationship.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_message,
_property_bag,
_reporting_descriptor_reference,
)
@dataclasses.dataclass
class ReportingDescriptorRelationship(object):
"""Information about the relation of one reporting descriptor to another."""
target: _reporting_descriptor_reference.ReportingDescriptorReference = (
dataclasses.field(metadata={"schema_property_name": "target"})
)
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
kinds: List[str] = dataclasses.field(
default_factory=lambda: ["relevant"], metadata={"schema_property_name": "kinds"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
===============================================================================================================================================
SOURCE CODE FILE: _result.py
LINES: 1
SIZE: 5.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_result.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_attachment,
_code_flow,
_fix,
_graph,
_graph_traversal,
_location,
_message,
_property_bag,
_reporting_descriptor_reference,
_result_provenance,
_stack,
_suppression,
_web_request,
_web_response,
)
@dataclasses.dataclass
class Result(object):
"""A result produced by an analysis tool."""
message: _message.Message = dataclasses.field(
metadata={"schema_property_name": "message"}
)
analysis_target: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "analysisTarget"}
)
attachments: Optional[List[_attachment.Attachment]] = dataclasses.field(
default=None, metadata={"schema_property_name": "attachments"}
)
baseline_state: Optional[Literal["new", "unchanged", "updated", "absent"]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "baselineState"}
)
)
code_flows: Optional[List[_code_flow.CodeFlow]] = dataclasses.field(
default=None, metadata={"schema_property_name": "codeFlows"}
)
correlation_guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "correlationGuid"}
)
fingerprints: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "fingerprints"}
)
fixes: Optional[List[_fix.Fix]] = dataclasses.field(
default=None, metadata={"schema_property_name": "fixes"}
)
graph_traversals: Optional[List[_graph_traversal.GraphTraversal]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "graphTraversals"}
)
)
graphs: Optional[List[_graph.Graph]] = dataclasses.field(
default=None, metadata={"schema_property_name": "graphs"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
hosted_viewer_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "hostedViewerUri"}
)
kind: Literal[
"notApplicable", "pass", "fail", "review", "open", "informational"
] = dataclasses.field(default="fail", metadata={"schema_property_name": "kind"})
level: Literal["none", "note", "warning", "error"] = dataclasses.field(
default="warning", metadata={"schema_property_name": "level"}
)
locations: Optional[List[_location.Location]] = dataclasses.field(
default=None, metadata={"schema_property_name": "locations"}
)
occurrence_count: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "occurrenceCount"}
)
partial_fingerprints: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "partialFingerprints"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
provenance: Optional[_result_provenance.ResultProvenance] = dataclasses.field(
default=None, metadata={"schema_property_name": "provenance"}
)
rank: float = dataclasses.field(
default=-1.0, metadata={"schema_property_name": "rank"}
)
related_locations: Optional[List[_location.Location]] = dataclasses.field(
default=None, metadata={"schema_property_name": "relatedLocations"}
)
rule: Optional[_reporting_descriptor_reference.ReportingDescriptorReference] = (
dataclasses.field(default=None, metadata={"schema_property_name": "rule"})
)
rule_id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "ruleId"}
)
rule_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "ruleIndex"}
)
stacks: Optional[List[_stack.Stack]] = dataclasses.field(
default=None, metadata={"schema_property_name": "stacks"}
)
suppressions: Optional[List[_suppression.Suppression]] = dataclasses.field(
default=None, metadata={"schema_property_name": "suppressions"}
)
taxa: Optional[
List[_reporting_descriptor_reference.ReportingDescriptorReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "taxa"})
web_request: Optional[_web_request.WebRequest] = dataclasses.field(
default=None, metadata={"schema_property_name": "webRequest"}
)
web_response: Optional[_web_response.WebResponse] = dataclasses.field(
default=None, metadata={"schema_property_name": "webResponse"}
)
work_item_uris: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "workItemUris"}
)
# flake8: noqa
```
|
==========================================================================================================================================================
SOURCE CODE FILE: _result_provenance.py
LINES: 1
SIZE: 1.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_result_provenance.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_physical_location,
_property_bag,
)
@dataclasses.dataclass
class ResultProvenance(object):
"""Contains information about how and when a result was detected."""
conversion_sources: Optional[List[_physical_location.PhysicalLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "conversionSources"}
)
)
first_detection_run_guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "firstDetectionRunGuid"}
)
first_detection_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "firstDetectionTimeUtc"}
)
invocation_index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "invocationIndex"}
)
last_detection_run_guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "lastDetectionRunGuid"}
)
last_detection_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "lastDetectionTimeUtc"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
============================================================================================================================================
SOURCE CODE FILE: _run.py
LINES: 3
SIZE: 5.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_run.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_address,
_artifact,
_conversion,
_external_property_file_references,
_graph,
_invocation,
_logical_location,
_property_bag,
_result,
_run_automation_details,
_special_locations,
_thread_flow_location,
_tool,
_tool_component,
_version_control_details,
_web_request,
_web_response,
)
@dataclasses.dataclass
class Run(object):
"""Describes a single run of an analysis tool, and contains the reported output of that run."""
tool: _tool.Tool = dataclasses.field(metadata={"schema_property_name": "tool"})
addresses: Optional[List[_address.Address]] = dataclasses.field(
default=None, metadata={"schema_property_name": "addresses"}
)
artifacts: Optional[List[_artifact.Artifact]] = dataclasses.field(
default=None, metadata={"schema_property_name": "artifacts"}
)
automation_details: Optional[_run_automation_details.RunAutomationDetails] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "automationDetails"}
)
)
baseline_guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "baselineGuid"}
)
column_kind: Optional[Literal["utf16CodeUnits", "unicodeCodePoints"]] = (
dataclasses.field(default=None, metadata={"schema_property_name": "columnKind"})
)
conversion: Optional[_conversion.Conversion] = dataclasses.field(
default=None, metadata={"schema_property_name": "conversion"}
)
default_encoding: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "defaultEncoding"}
)
default_source_language: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "defaultSourceLanguage"}
)
external_property_file_references: Optional[
_external_property_file_references.ExternalPropertyFileReferences
] = dataclasses.field(
default=None,
metadata={"schema_property_name": "externalPropertyFileReferences"},
)
graphs: Optional[List[_graph.Graph]] = dataclasses.field(
default=None, metadata={"schema_property_name": "graphs"}
)
invocations: Optional[List[_invocation.Invocation]] = dataclasses.field(
default=None, metadata={"schema_property_name": "invocations"}
)
language: str = dataclasses.field(
default="en-US", metadata={"schema_property_name": "language"}
)
logical_locations: Optional[List[_logical_location.LogicalLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "logicalLocations"}
)
)
newline_sequences: List[str] = dataclasses.field(
default_factory=lambda: ["\r\n", "\n"],
metadata={"schema_property_name": "newlineSequences"},
)
original_uri_base_ids: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "originalUriBaseIds"}
)
policies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "policies"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
redaction_tokens: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "redactionTokens"}
)
results: Optional[List[_result.Result]] = dataclasses.field(
default=None, metadata={"schema_property_name": "results"}
)
run_aggregates: Optional[List[_run_automation_details.RunAutomationDetails]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "runAggregates"}
)
)
special_locations: Optional[_special_locations.SpecialLocations] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "specialLocations"}
)
)
taxonomies: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "taxonomies"}
)
thread_flow_locations: Optional[List[_thread_flow_location.ThreadFlowLocation]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "threadFlowLocations"}
)
)
translations: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "translations"}
)
version_control_provenance: Optional[
List[_version_control_details.VersionControlDetails]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "versionControlProvenance"}
)
web_requests: Optional[List[_web_request.WebRequest]] = dataclasses.field(
default=None, metadata={"schema_property_name": "webRequests"}
)
web_responses: Optional[List[_web_response.WebResponse]] = dataclasses.field(
default=None, metadata={"schema_property_name": "webResponses"}
)
# flake8: noqa
```
|
===============================================================================================================================================================
SOURCE CODE FILE: _run_automation_details.py
LINES: 1
SIZE: 1.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_run_automation_details.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _message, _property_bag
@dataclasses.dataclass
class RunAutomationDetails(object):
"""Information that describes a run's identity and role within an engineering system process."""
correlation_guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "correlationGuid"}
)
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "id"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
==================================================================================================================================================
SOURCE CODE FILE: _sarif_log.py
LINES: 1
SIZE: 1.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_sarif_log.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_external_properties,
_property_bag,
_run,
)
@dataclasses.dataclass
class SarifLog(object):
"""Static Analysis Results Format (SARIF) Version 2.1.0 JSON Schema: a standard format for the output of static analysis tools."""
runs: List[_run.Run] = dataclasses.field(metadata={"schema_property_name": "runs"})
version: Literal["2.1.0"] = dataclasses.field(
metadata={"schema_property_name": "version"}
)
schema_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "$schema"}
)
inline_external_properties: Optional[
List[_external_properties.ExternalProperties]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "inlineExternalProperties"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
==========================================================================================================================================================
SOURCE CODE FILE: _special_locations.py
LINES: 1
SIZE: 0.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_special_locations.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_property_bag,
)
@dataclasses.dataclass
class SpecialLocations(object):
"""Defines locations of special significance to SARIF consumers."""
display_base: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "displayBase"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
==============================================================================================================================================
SOURCE CODE FILE: _stack.py
LINES: 1
SIZE: 0.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_stack.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_message,
_property_bag,
_stack_frame,
)
@dataclasses.dataclass
class Stack(object):
"""A call stack that is relevant to a result."""
frames: List[_stack_frame.StackFrame] = dataclasses.field(
metadata={"schema_property_name": "frames"}
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
====================================================================================================================================================
SOURCE CODE FILE: _stack_frame.py
LINES: 1
SIZE: 1.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_stack_frame.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag
@dataclasses.dataclass
class StackFrame(object):
"""A function call within a stack trace."""
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
module: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "module"}
)
parameters: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "parameters"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
thread_id: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "threadId"}
)
# flake8: noqa
```
|
====================================================================================================================================================
SOURCE CODE FILE: _suppression.py
LINES: 1
SIZE: 1.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_suppression.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag
@dataclasses.dataclass
class Suppression(object):
"""A suppression that is relevant to a result."""
kind: Literal["inSource", "external"] = dataclasses.field(
metadata={"schema_property_name": "kind"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
justification: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "justification"}
)
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
state: Optional[Literal["accepted", "underReview", "rejected"]] = dataclasses.field(
default=None, metadata={"schema_property_name": "state"}
)
# flake8: noqa
```
|
====================================================================================================================================================
SOURCE CODE FILE: _thread_flow.py
LINES: 1
SIZE: 1.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_thread_flow.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_message,
_property_bag,
_thread_flow_location,
)
@dataclasses.dataclass
class ThreadFlow(object):
"""Describes a sequence of code locations that specify a path through a single thread of execution such as an operating system or fiber."""
locations: List[_thread_flow_location.ThreadFlowLocation] = dataclasses.field(
metadata={"schema_property_name": "locations"}
)
id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "id"}
)
immutable_state: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "immutableState"}
)
initial_state: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "initialState"}
)
message: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "message"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=============================================================================================================================================================
SOURCE CODE FILE: _thread_flow_location.py
LINES: 1
SIZE: 2.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_thread_flow_location.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_location,
_property_bag,
_reporting_descriptor_reference,
_stack,
_web_request,
_web_response,
)
@dataclasses.dataclass
class ThreadFlowLocation(object):
"""A location visited by an analysis tool while simulating or monitoring the execution of a program."""
execution_order: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "executionOrder"}
)
execution_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "executionTimeUtc"}
)
importance: Literal["important", "essential", "unimportant"] = dataclasses.field(
default="important", metadata={"schema_property_name": "importance"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
kinds: Optional[List[str]] = dataclasses.field(
default=None, metadata={"schema_property_name": "kinds"}
)
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
module: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "module"}
)
nesting_level: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "nestingLevel"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
stack: Optional[_stack.Stack] = dataclasses.field(
default=None, metadata={"schema_property_name": "stack"}
)
state: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "state"}
)
taxa: Optional[
List[_reporting_descriptor_reference.ReportingDescriptorReference]
] = dataclasses.field(default=None, metadata={"schema_property_name": "taxa"})
web_request: Optional[_web_request.WebRequest] = dataclasses.field(
default=None, metadata={"schema_property_name": "webRequest"}
)
web_response: Optional[_web_response.WebResponse] = dataclasses.field(
default=None, metadata={"schema_property_name": "webResponse"}
)
# flake8: noqa
```
|
=============================================================================================================================================
SOURCE CODE FILE: _tool.py
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_tool.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag, _tool_component
@dataclasses.dataclass
class Tool(object):
"""The analysis tool that was run."""
driver: _tool_component.ToolComponent = dataclasses.field(
metadata={"schema_property_name": "driver"}
)
extensions: Optional[List[_tool_component.ToolComponent]] = dataclasses.field(
default=None, metadata={"schema_property_name": "extensions"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=======================================================================================================================================================
SOURCE CODE FILE: _tool_component.py
LINES: 1
SIZE: 5.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_tool_component.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, List, Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_multiformat_message_string,
_property_bag,
_reporting_descriptor,
_tool_component_reference,
_translation_metadata,
)
@dataclasses.dataclass
class ToolComponent(object):
"""A component, such as a plug-in or the driver, of the analysis tool that was run."""
name: str = dataclasses.field(metadata={"schema_property_name": "name"})
associated_component: Optional[_tool_component_reference.ToolComponentReference] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "associatedComponent"}
)
)
contents: List[Literal["localizedData", "nonLocalizedData"]] = dataclasses.field(
default_factory=lambda: ["localizedData", "nonLocalizedData"],
metadata={"schema_property_name": "contents"},
)
dotted_quad_file_version: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "dottedQuadFileVersion"}
)
download_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "downloadUri"}
)
full_description: Optional[_multiformat_message_string.MultiformatMessageString] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "fullDescription"}
)
)
full_name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "fullName"}
)
global_message_strings: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "globalMessageStrings"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
information_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "informationUri"}
)
is_comprehensive: Optional[bool] = dataclasses.field(
default=None, metadata={"schema_property_name": "isComprehensive"}
)
language: str = dataclasses.field(
default="en-US", metadata={"schema_property_name": "language"}
)
localized_data_semantic_version: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "localizedDataSemanticVersion"}
)
locations: Optional[List[_artifact_location.ArtifactLocation]] = dataclasses.field(
default=None, metadata={"schema_property_name": "locations"}
)
minimum_required_localized_data_semantic_version: Optional[str] = dataclasses.field(
default=None,
metadata={
"schema_property_name": "minimumRequiredLocalizedDataSemanticVersion"
},
)
notifications: Optional[List[_reporting_descriptor.ReportingDescriptor]] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "notifications"}
)
)
organization: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "organization"}
)
product: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "product"}
)
product_suite: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "productSuite"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
release_date_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "releaseDateUtc"}
)
rules: Optional[List[_reporting_descriptor.ReportingDescriptor]] = (
dataclasses.field(default=None, metadata={"schema_property_name": "rules"})
)
semantic_version: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "semanticVersion"}
)
short_description: Optional[
_multiformat_message_string.MultiformatMessageString
] = dataclasses.field(
default=None, metadata={"schema_property_name": "shortDescription"}
)
supported_taxonomies: Optional[
List[_tool_component_reference.ToolComponentReference]
] = dataclasses.field(
default=None, metadata={"schema_property_name": "supportedTaxonomies"}
)
taxa: Optional[List[_reporting_descriptor.ReportingDescriptor]] = dataclasses.field(
default=None, metadata={"schema_property_name": "taxa"}
)
translation_metadata: Optional[_translation_metadata.TranslationMetadata] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "translationMetadata"}
)
)
version: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "version"}
)
# flake8: noqa
```
|
=================================================================================================================================================================
SOURCE CODE FILE: _tool_component_reference.py
LINES: 1
SIZE: 0.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_tool_component_reference.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import _property_bag
@dataclasses.dataclass
class ToolComponentReference(object):
"""Identifies a particular toolComponent object, either the driver or an extension."""
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "name"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
```
|
=============================================================================================================================================================
SOURCE CODE FILE: _translation_metadata.py
LINES: 1
SIZE: 1.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_translation_metadata.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_multiformat_message_string,
_property_bag,
)
@dataclasses.dataclass
class TranslationMetadata(object):
"""Provides additional metadata related to translation."""
name: str = dataclasses.field(metadata={"schema_property_name": "name"})
download_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "downloadUri"}
)
full_description: Optional[_multiformat_message_string.MultiformatMessageString] = (
dataclasses.field(
default=None, metadata={"schema_property_name": "fullDescription"}
)
)
full_name: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "fullName"}
)
information_uri: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "informationUri"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
short_description: Optional[
_multiformat_message_string.MultiformatMessageString
] = dataclasses.field(
default=None, metadata={"schema_property_name": "shortDescription"}
)
# flake8: noqa
```
|
================================================================================================================================================================
SOURCE CODE FILE: _version_control_details.py
LINES: 1
SIZE: 1.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_version_control_details.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_location,
_property_bag,
)
@dataclasses.dataclass
class VersionControlDetails(object):
"""Specifies the information necessary to retrieve a desired revision from a version control system."""
repository_uri: str = dataclasses.field(
metadata={"schema_property_name": "repositoryUri"}
)
as_of_time_utc: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "asOfTimeUtc"}
)
branch: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "branch"}
)
mapped_to: Optional[_artifact_location.ArtifactLocation] = dataclasses.field(
default=None, metadata={"schema_property_name": "mappedTo"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
revision_id: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "revisionId"}
)
revision_tag: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "revisionTag"}
)
# flake8: noqa
```
|
====================================================================================================================================================
SOURCE CODE FILE: _web_request.py
LINES: 1
SIZE: 1.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_web_request.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_content,
_property_bag,
)
@dataclasses.dataclass
class WebRequest(object):
"""Describes an HTTP request."""
body: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
default=None, metadata={"schema_property_name": "body"}
)
headers: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "headers"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
method: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "method"}
)
parameters: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "parameters"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
protocol: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "protocol"}
)
target: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "target"}
)
version: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "version"}
)
# flake8: noqa
```
|
=====================================================================================================================================================
SOURCE CODE FILE: _web_response.py
LINES: 1
SIZE: 1.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\_web_response.py
ENCODING: utf-8
```py
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Any, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_content,
_property_bag,
)
@dataclasses.dataclass
class WebResponse(object):
"""Describes the response to an HTTP request."""
body: Optional[_artifact_content.ArtifactContent] = dataclasses.field(
default=None, metadata={"schema_property_name": "body"}
)
headers: Any = dataclasses.field(
default=None, metadata={"schema_property_name": "headers"}
)
index: int = dataclasses.field(
default=-1, metadata={"schema_property_name": "index"}
)
no_response_received: Optional[bool] = dataclasses.field(
default=None, metadata={"schema_property_name": "noResponseReceived"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
protocol: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "protocol"}
)
reason_phrase: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "reasonPhrase"}
)
status_code: Optional[int] = dataclasses.field(
default=None, metadata={"schema_property_name": "statusCode"}
)
version: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "version"}
)
# flake8: noqa
```
|
===============================================================================================================================================
SOURCE CODE FILE: version.py
LINES: 1
SIZE: 0.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\sarif\version.py
ENCODING: utf-8
```py
from typing import Final
SARIF_VERSION: Final = "2.1.0"
SARIF_SCHEMA_LINK: Final = "https://docs.oasis-open.org/sarif/sarif/v2.1.0/cs01/schemas/sarif-schema-2.1.0.json"
# flake8: noqa
```
|
=======================================================================================================================================
SOURCE CODE FILE: utils.py
LINES: 1
SIZE: 2.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\diagnostics\infra\utils.py
ENCODING: utf-8
```py
from __future__ import annotations
import functools
import inspect
import traceback
from typing import Any, Callable, TYPE_CHECKING
from torch.onnx._internal.diagnostics.infra import _infra, formatter
if TYPE_CHECKING:
from collections.abc import Mapping, Sequence
def python_frame(frame: traceback.FrameSummary) -> _infra.StackFrame:
"""Returns a StackFrame for the given traceback.FrameSummary."""
snippet = frame.line
return _infra.StackFrame(
location=_infra.Location(
uri=frame.filename,
line=frame.lineno,
snippet=snippet,
function=frame.name,
message=snippet,
)
)
def python_call_stack(frames_to_skip: int = 0, frames_to_log: int = 16) -> _infra.Stack:
"""Returns the current Python call stack."""
if frames_to_skip < 0:
raise ValueError("frames_to_skip must be non-negative")
if frames_to_log < 0:
raise ValueError("frames_to_log must be non-negative")
frames_to_skip += 1 # Skip this function.
stack = _infra.Stack()
# Frames are returned in order of oldest to newest.
frames = traceback.extract_stack(limit=frames_to_skip + frames_to_log)
frames.reverse()
stack.frames = [python_frame(frame) for frame in frames[frames_to_skip:]]
stack.message = "Python call stack"
return stack
@functools.lru_cache
def _function_source_info(fn: Callable) -> tuple[Sequence[str], int, str | None]:
"""Returns the source lines, line number, and source file path for the given function.
Essentially, inspect.getsourcelines() and inspect.getsourcefile() combined.
Caching is applied to reduce the performance impact of this function.
"""
source_lines, lineno = inspect.getsourcelines(fn)
return source_lines, lineno, inspect.getsourcefile(fn)
def function_location(fn: Callable) -> _infra.Location:
"""Returns a Location for the given function."""
source_lines, lineno, uri = _function_source_info(fn)
snippet = source_lines[0].strip() if len(source_lines) > 0 else "<unknown>"
return _infra.Location(
uri=uri,
line=lineno,
snippet=snippet,
message=formatter.display_name(fn),
)
def function_state(
fn: Callable, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> Mapping[str, Any]:
bind = inspect.signature(fn).bind(*args, **kwargs)
return bind.arguments
```
|
=================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\__init__.py
ENCODING: utf-8
```py
```
|
==================================================================================================================================
SOURCE CODE FILE: _analysis.py
LINES: 10
SIZE: 8.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_analysis.py
ENCODING: utf-8
```py
"""Compatibility analyzer for PyTorch models."""
# mypy: allow-untyped-defs
# flake8: noqa: B950 We do not need flake8 as it complains line length
from __future__ import annotations
import dataclasses
import textwrap
import traceback
from collections import defaultdict
from typing import TYPE_CHECKING
import torch
import torch._export.serde.schema
from torch.export import graph_signature
from torch.onnx._internal.exporter import _dispatching, _registration
if TYPE_CHECKING:
import torch.fx
@dataclasses.dataclass
class ModelInfo:
"""Information about the model."""
parameter_count: defaultdict[torch.dtype, int] = dataclasses.field(
default_factory=lambda: defaultdict(int)
)
buffer_count: defaultdict[torch.dtype, int] = dataclasses.field(
default_factory=lambda: defaultdict(int)
)
fx_node_count: int = 0
fx_node_op_count: defaultdict[str, int] = dataclasses.field(
default_factory=lambda: defaultdict(int)
)
fx_node_target_count: defaultdict[str, int] = dataclasses.field(
default_factory=lambda: defaultdict(int)
)
dispatch_failures: list[tuple[torch.fx.Node, str]] = dataclasses.field(
default_factory=list
)
inputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(
default_factory=dict
)
outputs: dict[str, torch._export.serde.schema.TensorMeta] = dataclasses.field(
default_factory=dict
)
def _count_weights(
exported_program: torch.export.ExportedProgram,
) -> tuple[defaultdict[torch.dtype, int], defaultdict[torch.dtype, int]]:
"""Count the size of the parameters in the exported program."""
parameter_count: defaultdict[torch.dtype, int] = defaultdict(int)
buffer_count: defaultdict[torch.dtype, int] = defaultdict(int)
for parameter in exported_program.parameters():
dtype = parameter.dtype
parameter_count[dtype] += parameter.numel()
for buffer in exported_program.buffers():
dtype = buffer.dtype
buffer_count[dtype] += buffer.numel()
return parameter_count, buffer_count
def _format_model_info(model_info: ModelInfo) -> str:
"""Format the information about the model."""
lines = [
textwrap.dedent(
f"""\
PyTorch ONNX Conversion Analysis
## Model Information
The model has {sum(model_info.parameter_count.values())} parameters and {sum(model_info.buffer_count.values())} buffers (non-trainable parameters).
Number of parameters per dtype:
```python
{model_info.parameter_count}
```
Number of buffers per dtype:
```python
{model_info.buffer_count}
```
"""
),
"Inputs:",
*[f"- `{name}`: `{meta}`" for name, meta in model_info.inputs.items()],
"",
"Outputs:",
*[f"- `{name}`: `{meta}`" for name, meta in model_info.outputs.items()],
"",
f"The FX graph has {model_info.fx_node_count} nodes in total. Number of FX nodes per op:",
]
for op, count in model_info.fx_node_op_count.items():
lines.append(f"- `{op}`: {count}")
lines.append("\n")
lines.append("Of the call_function nodes, the counts of operators used are:\n")
sorted_targets = sorted(
model_info.fx_node_target_count.items(), key=lambda x: x[1], reverse=True
)
for target, count in sorted_targets:
lines.append(f"- `{target}`: {count}")
lines.append("")
lines.append("## ONNX Conversion Information")
lines.append("")
if model_info.dispatch_failures:
lines.append(
"The model contains operators the dispatcher could not find registered ONNX decompositions for. "
"This may be due to missing implementations, decompositions not registered "
"correctly, or a bug in the dispatcher."
)
lines.append("")
lines.append("Errors grouped by operator:\n")
target_to_nodes = defaultdict(list)
for node, _ in model_info.dispatch_failures:
target_to_nodes[str(node.target)].append(node)
target_to_messages = {}
for node, message in model_info.dispatch_failures:
if str(node.target) not in target_to_messages:
target_to_messages[str(node.target)] = message
for target, nodes in sorted(
target_to_nodes.items(), key=lambda x: x[0], reverse=True
):
message = textwrap.indent(
f"{target_to_messages[target]}. Example node: `{nodes[0].format_node()}`. All nodes: `{nodes}`",
" ",
)
lines.append(f"- `{target}`: {message}")
else:
lines.append("All operators in the model have registered ONNX decompositions.")
return "\n".join(lines)
def _get_io_specs(exported_program: torch.export.ExportedProgram) -> tuple[dict, dict]:
"""Get the input and output specs of the exported program."""
nodes: dict[str, torch.fx.Node] = {
node.name: node for node in exported_program.graph.nodes
}
user_inputs = [
spec
for spec in exported_program.graph_signature.input_specs
if spec.kind == graph_signature.InputKind.USER_INPUT
]
user_outputs = [
spec
for spec in exported_program.graph_signature.output_specs
if spec.kind == graph_signature.OutputKind.USER_OUTPUT
]
inputs: dict[str, torch._export.serde.schema.TensorMeta] = {}
outputs: dict[str, torch._export.serde.schema.TensorMeta] = {}
for spec in user_inputs:
if isinstance(spec.arg, graph_signature.ConstantArgument):
continue
name = spec.arg.name
# FIXME: tensor_meta is None sometimes when the exported program still knows the shape/type
inputs[name] = nodes[name].meta["tensor_meta"]
for spec in user_outputs:
if isinstance(spec.arg, graph_signature.ConstantArgument):
continue
name = spec.arg.name
outputs[name] = nodes[name].meta["tensor_meta"]
return inputs, outputs
def _count_fx_targets(
exported_program: torch.export.ExportedProgram,
) -> defaultdict[str, int]:
"""Count the number of targets for each node in the exported program."""
fx_node_target_count: defaultdict[str, int] = defaultdict(int)
for node in exported_program.graph.nodes:
if node.op == "call_function":
fx_node_target_count[str(node.target)] += 1
return fx_node_target_count
def analyze(
exported_program: torch.export.ExportedProgram,
registry: _registration.ONNXRegistry | None = None,
file=None,
) -> None:
"""Analyze the compatibility of the exported program."""
# Get basic information about the model
model_info = ModelInfo()
model_info.parameter_count, model_info.buffer_count = _count_weights(
exported_program
)
model_info.fx_node_count = len(exported_program.graph.nodes)
model_info.fx_node_target_count = _count_fx_targets(exported_program)
inputs, outputs = _get_io_specs(exported_program)
model_info.inputs = inputs
model_info.outputs = outputs
if registry is None:
registry = _registration.ONNXRegistry.from_torchlib()
# Try to find ops for every node in the graph
for node in exported_program.graph.nodes:
model_info.fx_node_op_count[node.op] += 1
if node.op == "call_function":
try:
onnx_function, message = _dispatching.dispatch(node, registry)
except Exception as e:
message = "Critical Error in dispatcher:\n"
formatted_exception = "\n".join(
traceback.format_exception(type(e), e, e.__traceback__)
)
message += f"```pytb\n{formatted_exception}\n```\n"
onnx_function = None
if onnx_function is None:
model_info.dispatch_failures.append((node, message))
# Print the results
report = _format_model_info(model_info)
print(report, file=file, flush=True)
def compare_ops(
program_a: torch.export.ExportedProgram, program_b: torch.export.ExportedProgram
) -> tuple[set[str], set[str]]:
"""Compare and get unique ops in two exported programs.
Args:
program_a: The first exported program.
program_b: The second exported program.
Returns:
A tuple of two sets, where the first set contains the unique ops in the first program
and the second set contains the unique ops in the second program.
"""
program_a_ops = set(_count_fx_targets(program_a))
program_b_ops = set(_count_fx_targets(program_b))
return program_a_ops - program_b_ops, program_b_ops - program_a_ops
```
|
==================================================================================================================================
SOURCE CODE FILE: _building.py
LINES: 1
SIZE: 30.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_building.py
ENCODING: utf-8
```py
"""NOTES:
We need a typing module that will handling Python to ONNX type promotion for use.
For example, if we have torch.ops.aten.add(Tensor, 1.0), we need to promote 1.0
to the same type as Tensor. The same thing needs to work for
torch.ops.aten.add(1.0, Tensor) as well, which means we need a mechanism to`
"""
# mypy: allow-untyped-defs
# mypy: disable-error-code=union-attr
from __future__ import annotations
import copy
import inspect
import logging
from collections.abc import Iterable, Mapping, Sequence
from typing import Any, TYPE_CHECKING, Union
import onnxscript
from onnxscript import evaluator, ir
from onnxscript.ir import convenience as ir_convenience
import torch
from torch.onnx._internal.exporter import _errors, _schemas, _tensors
if TYPE_CHECKING:
import onnx
logger = logging.getLogger(__name__)
ValidAttributeType = Union[
ir.TensorProtocol, int, float, bool, str, Sequence[int], Sequence[float], None
]
AllowedArgType = Union[
ir.Value, Sequence[Union[ir.Value, ValidAttributeType]], ValidAttributeType
]
# Logic for adapting inputs from general Python or PyTorch inputs to ONNX ir.Value
def _construct_named_inputs_and_attrs(
signature: _schemas.OpSignature,
args: Sequence[AllowedArgType],
kwargs: Mapping[str, AllowedArgType],
) -> tuple[dict[str, AllowedArgType], dict[str, ValidAttributeType]]:
"""Construct two mappings: name to inputs and named to attributes based on the signature and args/kwargs.
This function uses the OpSignature to determine which argument in args and kwargs corresponds to
which parameter in the signature. ONNX node inputs are stored in named_inputs, and attributes are
stored in named_attrs. If an _optional input_ is not provided, it is filled with None.
Args:
signature: The OpSignature for the node.
args: The positional arguments for the node.
kwargs: The keyword arguments for the node.
Returns:
A tuple of two mappings: named_inputs and named_attrs.
Raises:
ValueError: If a required parameter is not provided.
"""
# 1. Construct the (named_inputs, named_attrs) mapping based on (args, kwargs) and the signature.
# a. Loop over all parameters in the signature and args together
# b. Depending on param.is_input, Record named_inputs[param.name] = arg or named_attrs[param.name] = arg
# c. Handle kwargs as well
# d. Fill in None if the input is not provided
named_inputs: dict[str, Any] = {}
named_attrs: dict[str, Any] = {}
reversed_args_stack = list(reversed(args))
for param in signature.params:
if isinstance(param, _schemas.Parameter):
# Handle inputs
if reversed_args_stack:
# First exhaust the positional arguments
if param.variadic:
# Handle variadic arguments
named_inputs[param.name] = tuple(args)
reversed_args_stack.clear()
else:
named_inputs[param.name] = reversed_args_stack.pop() # type: ignore[assignment]
elif param.name in kwargs:
named_inputs[param.name] = kwargs[param.name] # type: ignore[assignment]
elif param.required:
raise ValueError(
f"Required parameter '{param.name}' is not provided. "
f"Signature: {signature}. Args: {args}. Kwargs: {kwargs}."
)
else:
logger.debug(
"Optional parameter '%s' is not provided. Added as None. Signature: %s",
param.name,
signature,
)
named_inputs[param.name] = None # type: ignore[assignment]
else:
# Handle attributes
attribute: ValidAttributeType | ir.Attr
assert isinstance(param, _schemas.AttributeParameter), (
f"Expected AttributeParameter, got {type(param)}"
)
if reversed_args_stack:
# First exhaust the positional arguments
attribute = reversed_args_stack.pop() # type: ignore[assignment]
elif param.name in kwargs:
attribute = kwargs[param.name] # type: ignore[assignment]
elif param.default is not None:
attribute = param.default
else:
attribute = None
if attribute is None:
if param.required:
raise ValueError(
f"Required attribute '{param.name}' is not provided. "
f"Signature: {signature}. Args: {args}. Kwargs: {kwargs}."
)
else:
logger.debug(
"Optional attribute '%s' is None. Dropped. Signature: %s",
param.name,
signature,
)
continue
if isinstance(attribute, ir.Attr):
# Turn the attribute from an default value into an actual parameter for the node
attr_copied = copy.copy(attribute)
# Make sure the name is the same as the parameter name and not the name of the default parameter
attr_copied.name = param.name
attribute = attr_copied
if isinstance(attribute, int) and param.type == ir.AttributeType.FLOAT:
# Convert the attribute to float if needed. This happens in PyTorch
# where an attribute marked as float can be passed as an int.
attribute = float(attribute)
named_attrs[param.name] = attribute
return named_inputs, named_attrs # type: ignore[return-value]
def _resolve_parameter_dtypes(
signature: _schemas.OpSignature, named_inputs: Mapping[str, AllowedArgType]
) -> Mapping[_schemas.TypeConstraintParam, ir.TypeProtocol]:
"""Determine which parameter takes which type.
Handle non-tensor input corner cases and type promotion.
Requires:
All ir.Value in name_inputs should have type set. Their type should be
compatible with the type_constraint of the corresponding parameter in the signature.
Args:
signature: The OpSignature for the node.
named_inputs: The mapping of parameter names to their arguments.
Returns:
A mapping of Constraint names to ir.TypeProtocol.
"""
# a. Create type_binding: dict[str, ir.TypeProtocol]
# b. Iterate over all named_inputs
# b0. Find the corresponding parameter in the signature
# b1. If the argument is a Python constant, skip.
# b2. If the argument is a ir.Value, Bind {constraint: arg.type}.
type_binding = {}
for name, arg in named_inputs.items():
param = signature.params_map[name]
assert isinstance(param, _schemas.Parameter), (
f"Expected Parameter, got {type(param)}"
)
if isinstance(arg, (int, float, bool, str, Sequence, torch.Tensor)):
# Skip the Python constants because we do not know what dtype they should take yet
continue
elif isinstance(arg, ir.Value):
if arg.type is None:
# Skip the ir.Value if the type is not set
continue
# NOTE: We assume arg.type is compatible with the type_constraint
assert arg.type is not None, f"Expected type to be set for {arg}"
# TODO(justinchuby): Implement type promotion logic here.
type_binding[param.type_constraint] = arg.type
return type_binding
def _determine_input_dtype(
param: _schemas.Parameter,
arg: AllowedArgType,
type_binding: Mapping[_schemas.TypeConstraintParam, ir.TypeProtocol],
) -> ir.DataType:
"""Determine the dtype of the input that is a mix of Python constants and ir.Value."""
if param.type_constraint in type_binding:
# A known dtype is available because it was resolved
return type_binding[param.type_constraint].dtype
if len(param.type_constraint.allowed_types) == 1:
# Only one type is allowed by the type constraint
return next(iter(param.type_constraint.allowed_types)).dtype
# No dtype information available. Infer from the Python constant or (in the Sequence case)
# from a mix of Python constants and ir.Value
if isinstance(arg, bool):
return ir.DataType.BOOL
if isinstance(arg, float):
return ir.DataType.FLOAT
if isinstance(arg, int):
return ir.DataType.INT64
if isinstance(arg, str):
return ir.DataType.STRING
if isinstance(arg, (ir.Tensor, ir.TensorProtocol)):
return arg.dtype
if isinstance(arg, complex):
return ir.DataType.FLOAT
if arg is None:
return ir.DataType.UNDEFINED
# Handle sequences
if isinstance(arg, (tuple, list)):
if len(arg) == 0:
# Special case: Treat empty sequence as INT64 as they are typically used for shape
return ir.DataType.INT64
# Try to obtain the dtype from one of the values
for val in arg:
if isinstance(val, ir.Value) and val.dtype is not None:
return val.dtype
if any(isinstance(val, float) for val in arg):
# If any float is present, the dtype is float
return ir.DataType.FLOAT
elif any(isinstance(val, int) for val in arg):
# Otherwise if any int is present, the dtype is int
return ir.DataType.INT64
raise ValueError(
f"Could not determine the dtype for the input '{param.name}'. "
f"param={param}, arg={arg}, param_type_constraint={param.type_constraint}, "
f"type_binding={type_binding}"
)
def _allowed_types_are_sequence_types(allowed_types: Iterable[ir.TypeProtocol]) -> bool:
"""Check if all allowed types are Sequence types."""
return all(isinstance(t, ir.SequenceType) for t in allowed_types)
def _get_or_create_constant(
constant_farm: dict[
tuple[
bool | int | float | str | tuple[int] | tuple[float],
ir.DataType,
],
ir.Value,
],
arg: bool
| int
| float
| str
| tuple[int]
| tuple[float]
| tuple[bool]
| list[int]
| list[float]
| list[bool],
dtype: ir.DataType,
opset: onnxscript.values.Opset,
) -> ir.Value:
# float representation of complex numbers
if isinstance(arg, complex):
# Convert the complex number to a float
arg = (arg.real, arg.imag)
if isinstance(arg, list):
# Make the arg hashable
arg = tuple(arg) # type: ignore[assignment]
constant_value = constant_farm.get((arg, dtype)) # type: ignore[arg-type]
if constant_value is None:
constant_tensor = ir.tensor(value=arg, dtype=dtype) # type: ignore[arg-type]
constant_value = opset.Constant(value=constant_tensor)
constant_farm[(arg, dtype)] = constant_value # type: ignore[arg-type,index]
return constant_value
def _process_python_constants(
signature: _schemas.OpSignature,
named_inputs: dict[str, AllowedArgType],
type_binding: Mapping[_schemas.TypeConstraintParam, ir.TypeProtocol],
constant_farm: dict[
tuple[
bool | int | float | str | tuple[int] | tuple[float],
ir.DataType,
],
ir.Value,
],
opset: onnxscript.values.Opset,
) -> dict[str, ir.Value | None]:
"""Convert Python constants to Constant nodes and list to Sequence nodes based on the dtype information.
The added constants will be replacing values in named_inputs in place.
Args:
signature: The OpSignature for the node.
named_inputs: The mapping of parameter names to their arguments.
type_binding: A mapping of Constraint names to ir.DataType.
constant_farm: A dictionary of {(py_value, ir.DataType): ir.Value} to store the deduplicated constants.
opset: The Opset to use for creating Constant nodes.
Returns:
A mapping of parameter names to Python constants converted to constant Nodes.
"""
# 3. Convert Python constants to Constant nodes based on the dtype information;
# construct sequences
# a. Iterate over all parameters in the signature the second time
# b. If the parameter is in to_resolve_type:
# - If param.constraint in type_binding,
# Get the constant from constant_farm (deduplicated);
# otherwise set named_inputs[param.name] = Constant(value, dtype=type_binding[param.constraint])
# - Otherwise, set named_inputs[param.name] = Constant(value)
for name, arg in named_inputs.items():
param = signature.params_map[name]
assert isinstance(param, _schemas.Parameter), (
f"Expected Parameter, got {type(param)}"
)
if isinstance(arg, ir.Value):
# TODO(justinchuby): Cast the ir.Value here if needed
continue
if (
isinstance(arg, Sequence)
and len(arg) > 0
and any(isinstance(val, ir.Value) for val in arg)
):
# Skip the sequence of ir.Value. This is a variadic input or a Sequence input
# It will be handled by _process_python_sequences
continue
if param.variadic:
# Handled by _process_python_sequences
continue
if _allowed_types_are_sequence_types(param.type_constraint.allowed_types):
# Handled by _process_python_sequences
continue
dtype = _determine_input_dtype(param, arg, type_binding)
if arg is None:
constant_value = None
elif isinstance(arg, (ir.Tensor, ir.TensorProtocol)):
constant_value = opset.Constant(value=arg)
else:
# Deduplicate the constants
constant_value = _get_or_create_constant(constant_farm, arg, dtype, opset) # type: ignore[arg-type]
named_inputs[param.name] = constant_value
return named_inputs # type: ignore[return-value]
def _reshape_to_1d_tensor(opset: onnxscript.values.Opset, arg: ir.Value) -> ir.Value:
"""Reshape the input to a 1D tensor."""
return opset.Reshape(
arg, opset.Constant(value=ir.tensor([-1], dtype=ir.DataType.INT64))
)
def _process_python_sequences(
signature: _schemas.OpSignature,
named_inputs: dict[str, AllowedArgType],
type_binding: Mapping[_schemas.TypeConstraintParam, ir.TypeProtocol],
constant_farm: dict[
tuple[
bool | int | float | str | ir.TensorProtocol | tuple[int] | tuple[float],
ir.DataType,
],
ir.Value,
],
opset: onnxscript.values.Opset,
):
"""Handle three types of sequences.
1. Variadic inputs
2. Sequence input of ir.Value,
3. Sequence of Python constants that contains ir.Value
"""
for name, arg in named_inputs.items():
param = signature.params_map[name]
assert isinstance(param, _schemas.Parameter), (
f"Expected Parameter, got {type(param)}"
)
if not isinstance(arg, (tuple, list)):
continue
if len(arg) == 0:
# Skip empty sequences
continue
# 1. Sequence input of ir.Value
if _allowed_types_are_sequence_types(param.type_constraint.allowed_types):
# Turn the list into a Sequence node
# Constant op creation will be handled by the variadic case below when calling
# the SequenceConstruct op.
named_inputs[name] = opset.SequenceConstruct(*arg)
continue
# 2. Variadic inputs
# NOTE: Variadic operators like Max can be called with mixed ir.Value and Python constants
# like `Max(0, ir.Value())`
# We need to convert the Python constants to Constant nodes
if param.variadic:
if all(isinstance(val, ir.Value) for val in arg):
# Skip the variadic input if all values are ir.Value
continue
dtype = _determine_input_dtype(param, arg, type_binding)
new_args = []
for val in arg:
if isinstance(val, ir.Value):
new_args.append(val)
else:
constant_tensor = ir.tensor(value=val, dtype=dtype) # type: ignore[arg-type]
constant_value = opset.Constant(value=constant_tensor)
new_args.append(constant_value)
named_inputs[name] = new_args
continue
else:
# 3. Concat the list as a single input
# E.g. [Value, 42] should be converted to op.Concat(Value, Constant(42))
# when the expected input type is INT64
# We assume this only happens for 0D cases
if all(isinstance(val, ir.Value) for val in arg):
expanded_args = [_reshape_to_1d_tensor(opset, val) for val in arg]
named_inputs[name] = opset.Concat(*expanded_args, axis=0)
continue
dtype = _determine_input_dtype(param, arg, type_binding)
new_args = []
for val in arg:
if isinstance(val, ir.Value):
new_args.append(_reshape_to_1d_tensor(opset, val))
elif val is None:
# Skip None values
continue
elif isinstance(val, (ir.Tensor, ir.TensorProtocol)):
new_args.append(
_reshape_to_1d_tensor(opset, opset.Constant(value=val))
)
else:
# Turn the Python constant into 1D tensor for the constant
assert isinstance(val, (bool, int, float)), (
f"Expected int or float, got {type(val)}"
)
new_args.append(
_get_or_create_constant(constant_farm, [val], dtype, opset) # type: ignore[arg-type]
)
named_inputs[name] = opset.Concat(*new_args, axis=0)
continue
return named_inputs
def _determine_output_number(
signature: _schemas.OpSignature, named_attrs: Mapping[str, ValidAttributeType]
) -> int:
"""Determine the number of outputs for the node with heuristics."""
if signature.domain == "":
if signature.name == "BatchNormalization":
if not named_attrs.get("training_mode", 0):
return 1
if signature.name == "Split":
num_outputs = named_attrs.get("num_outputs")
if num_outputs is not None and isinstance(num_outputs, int):
return num_outputs
else:
raise ValueError(
"Could not determine the number of outputs for Split. "
"num_outputs must be provided"
)
return len(signature.outputs)
def _construct_node(
signature: _schemas.OpSignature,
named_inputs: Mapping[str, ir.Value | None],
named_attrs: Mapping[str, ValidAttributeType],
opset: onnxscript.values.Opset,
num_outputs: int,
) -> ir.Node:
"""Construct the node with the inputs and attributes.
Variadic inputs are flattened.
Args:
signature: The OpSignature for the node.
named_inputs: The mapping of parameter names to their arguments. When we
do not have the schema of an operator, we do not know the names of
the inputs, in which case the names can be anything because they
are not used in this function. The data structure is passed in for
consistency with the other functions.
named_attrs: The mapping of attribute names to their values.
num_outputs: The number of outputs for the node.
"""
inputs: list[ir.Value | None] = []
# Flatten variadic inputs
for value in named_inputs.values():
if isinstance(value, Sequence):
inputs.extend(value)
else:
inputs.append(value)
# If final inputs are None, strip them from the node inputs
for input in reversed(inputs):
if input is not None:
break
inputs.pop()
# Construct and filter out None attributes
attributes = [
attr
for attr in ir_convenience.convert_attributes(named_attrs)
if attr.value is not None
]
outputs = [_tensors.SymbolicTensor(opset) for _ in range(num_outputs)]
return ir.Node(
signature.domain,
signature.name,
inputs=inputs,
attributes=attributes,
outputs=outputs,
version=signature.opset_version,
)
class OpRecorder(evaluator.Evaluator):
"""An onnxscript Evaluator that captures the graph into ONNX IR."""
def __init__(
self, opset: onnxscript.values.Opset, constant_farm: dict[Any, ir.Value]
):
self.nodes: list[ir.Node] = []
self.opset = opset
self.functions: dict[
ir.OperatorIdentifier, onnxscript.OnnxFunction | ir.Function
] = {}
self.constant_farm = constant_farm
def _call_op(
self,
op_signature: _schemas.OpSignature,
named_inputs: dict[str, AllowedArgType],
named_attrs: dict[str, ValidAttributeType],
num_outputs: int,
) -> Sequence[_tensors.SymbolicTensor]:
"""Record nodes for the given opschema and arguments.
Args:
op_signature: The OpSchema containing the node signature.
named_inputs: The mapping of parameter names to their arguments.
named_attrs: The mapping of attribute names to their values.
"""
type_binding = _resolve_parameter_dtypes(op_signature, named_inputs)
try:
converted_named_inputs = _process_python_constants(
op_signature, named_inputs, type_binding, self.constant_farm, self.opset
)
converted_named_inputs = _process_python_sequences(
op_signature,
converted_named_inputs, # type: ignore[arg-type]
type_binding,
self.constant_farm,
self.opset,
)
except Exception as e:
raise _errors.GraphConstructionError(
f"Error processing Python constants for operator '{op_signature.domain}::{op_signature.name}'. "
f"named_inputs={named_inputs}, named_attrs={named_attrs}, opset={self.opset}, op_signature={op_signature}."
) from e
try:
self.nodes.append(
node := _construct_node(
op_signature,
converted_named_inputs,
named_attrs,
self.opset,
num_outputs,
)
)
except Exception as e:
raise _errors.GraphConstructionError(
f"Error constructing node for operator '{op_signature.domain}::{op_signature.name}'. "
f"named_inputs={named_inputs}, converted_named_inputs={converted_named_inputs}, "
f"named_attrs={named_attrs}, opset={self.opset}, op_signature={op_signature}."
) from e
return node.outputs # type: ignore[return-value]
def eval(
self,
schema: onnx.defs.OpSchema,
args: Sequence[AllowedArgType], # type: ignore[override]
kwargs: Mapping[str, AllowedArgType],
) -> _tensors.SymbolicTensor | Sequence[_tensors.SymbolicTensor]:
try:
op_signature = _schemas.OpSignature.from_opschema(schema)
named_inputs, named_attrs = _construct_named_inputs_and_attrs(
op_signature, args, kwargs
)
# TODO(justinchuby): Handle cast
if schema.name == "CastLike":
assert len(named_inputs) == 2
# Skip CastLike if the input and output types are the same
src_input = named_inputs["input"]
target_type = named_inputs["target_type"]
if (
isinstance(src_input, ir.Value)
and isinstance(target_type, ir.Value)
and src_input.dtype is not None
and target_type.dtype is not None
):
# dtypes are available
if src_input.dtype == target_type.dtype:
# Same type. No cast needed
return src_input # type: ignore[return-value]
else:
# Create a Cast node
return self.opset.Cast(src_input, to=target_type.dtype) # type: ignore[union-attr,return-value]
num_outputs = _determine_output_number(op_signature, named_attrs)
outputs = self._call_op(
op_signature, named_inputs, named_attrs, num_outputs
)
if len(outputs) == 1:
return outputs[0]
return outputs
except Exception as e:
raise _errors.GraphConstructionError(
f"Error calling operator '{schema.name}' with args {args} and kwargs {kwargs}."
) from e
def eval_function( # type: ignore[override]
self,
function: onnxscript.OnnxFunction,
args: Sequence[AllowedArgType],
kwargs: Mapping[str, AllowedArgType],
) -> _tensors.SymbolicTensor | Sequence[_tensors.SymbolicTensor] | bool | int:
try:
# TODO(justinchuby): Remove this once IsScalar and Rank are removed
# Special cases for handling IsScalar and Rank
if function.name == "IsScalar":
if len(args) != 1:
raise TypeError(
f"Expected 1 positional argument for function '{function}', got {len(args)}."
)
if isinstance(args[0], _tensors.SymbolicTensor):
if args[0].rank is not None:
return args[0].rank == 0
else:
# Fall to call add_function_call
pass
elif isinstance(args[0], Sequence):
return False
else:
# Python constants are scalars
return True
if function.name == "Rank":
if len(args) != 1:
raise TypeError(
f"Expected 1 positional argument for function '{function}', got {len(args)}."
)
if isinstance(args[0], _tensors.SymbolicTensor):
if args[0].rank is not None:
return args[0].rank
else:
# Fall to call add_function_call
pass
elif isinstance(args[0], Sequence):
if all(isinstance(arg, (int, float)) for arg in args[0]):
return 1
else:
# Fall to call add_function_call
pass
else:
# Python constants are scalars
return 0
# NOTE: signature should be written to function in the registration process
if hasattr(function, "_pt_onnx_signature"):
op_signature = function._pt_onnx_signature # type: ignore[attr-defined]
else:
op_signature = _schemas.OpSignature.from_function(
function,
function.function_ir.domain,
function.name,
opset_version=function.opset.version,
)
function._pt_onnx_signature = op_signature # type: ignore[attr-defined]
named_inputs, named_attrs = _construct_named_inputs_and_attrs(
op_signature, args, kwargs
)
# TODO(after torchlib migration): Remove traceable function handling
# NOTE: We need to call traceable functions after the _construct_named_inputs_and_attrs
# call because it will filter out the unexpected kwargs for us.
if function.traceable:
# Trace the function call instead of adding the function as a node
# Turn the ir.Attr objects into Python constants first
named_attrs = {
name: attr.value if isinstance(attr, ir.Attr) else attr
for name, attr in named_attrs.items()
}
# Use the type binding to resolve the dtypes of the inputs, and
# convert Python constants to Constant nodes
type_binding = _resolve_parameter_dtypes(op_signature, named_inputs)
try:
# _process_python_sequences is not here because we want to preserve python list
# properties for the function call
converted_named_inputs = _process_python_constants(
op_signature,
named_inputs,
type_binding,
self.constant_farm,
self.opset,
)
except Exception as e:
raise _errors.GraphConstructionError(
f"Error processing Python constants for operator '{op_signature.domain}::{op_signature.name}'. "
f"named_inputs={named_inputs}, named_attrs={named_attrs}, opset={self.opset}, op_signature={op_signature}."
) from e
return function.function(**converted_named_inputs, **named_attrs)
outputs = self._call_op(
op_signature,
named_inputs,
named_attrs,
len(op_signature.outputs),
)
self.functions[(function.function_ir.domain, function.name, "")] = function
if len(outputs) == 1:
return outputs[0]
return outputs
except Exception as e:
try:
source_file = inspect.getsourcefile(function.function)
_, lineno = inspect.getsourcelines(function.function)
except Exception:
source_file = lineno = None
raise _errors.GraphConstructionError(
f"Error calling function '{function.name}' with args {args} and kwargs {kwargs}."
+ f" The function is defined at '{source_file}:{lineno}'."
if source_file
else ""
) from e
```
|
============================================================================================================================================
SOURCE CODE FILE: _capture_strategies.py
LINES: 2
SIZE: 11.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_capture_strategies.py
ENCODING: utf-8
```py
"""Strategies for capturing ExportedPrograms."""
# mypy: allow-untyped-defs
from __future__ import annotations
import abc
import contextlib
import dataclasses
import datetime
import logging
import pathlib
from typing import Any, Callable, TYPE_CHECKING
import torch
from torch.utils import _pytree
if TYPE_CHECKING:
import os
logger = logging.getLogger(__name__)
def _verbose_printer(verbose: bool | None) -> Callable[..., None]:
"""Prints messages based on `verbose`."""
if verbose is False:
return lambda *_, **__: None
return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs)
def _take_first_line(text: str) -> str:
"""Take the first line of a text."""
lines = text.split("\n", maxsplit=1)
first_line = lines[0]
if len(lines) > 1:
first_line += "[...]"
return first_line
@contextlib.contextmanager
def _patch_dynamo_unsupported_functions():
"""Patch PyTorch to bypass some functions torch.export.export does not support."""
# TODO: Remove the patches once dynamo supports these functions.
import torch.jit
# Replace torch.jit.isinstance with isinstance
jit_isinstance = torch.jit.isinstance
torch.jit.isinstance = isinstance
logger.info("Replaced torch.jit.isinstance with isinstance to allow dynamo tracing")
try:
yield
finally:
torch.jit.isinstance = jit_isinstance
@dataclasses.dataclass
class Result:
exported_program: torch.export.ExportedProgram | None
strategy: str
exception: Exception | None = None
@property
def success(self) -> bool:
return self.exported_program is not None
class CaptureStrategy(abc.ABC):
"""Strategy for capturing a module as ExportedProgram.
To use a strategy, create an instance and call it with the model, args, kwargs, and dynamic_shapes.
Example::
strategy = TorchExportStrategy(verbose=True)
result = strategy(model, args, kwargs, dynamic_shapes)
"""
def __init__(
self,
*,
verbose: bool = False,
dump: bool = False,
artifacts_dir: str | os.PathLike = ".",
timestamp: str | None = None,
):
"""Initialize the strategy.
Args:
verbose: Whether to print verbose messages.
dump: Whether to dump the intermediate artifacts to a file.
"""
self._verbose_print = _verbose_printer(verbose)
self._dump = dump
self._artifacts_dir = pathlib.Path(artifacts_dir)
self._timestamp = timestamp or datetime.datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S-%f"
)
def __call__(
self,
model: torch.nn.Module | torch.jit.ScriptFunction,
args: tuple[Any, ...],
kwargs: dict[str, Any] | None,
dynamic_shapes,
) -> Result:
self._enter(model)
if kwargs is None:
kwargs = {}
try:
exported_program = self._capture(model, args, kwargs, dynamic_shapes)
except Exception as e:
self._failure(model, e)
return Result(
exported_program=None,
strategy=self.__class__.__name__,
exception=e,
)
self._success(model)
return Result(exported_program, strategy=self.__class__.__name__)
@abc.abstractmethod
def _capture(
self, model, args, kwargs, dynamic_shapes
) -> torch.export.ExportedProgram:
raise NotImplementedError
def _enter(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None:
return
def _success(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None:
return
def _failure(
self, model: torch.nn.Module | torch.jit.ScriptFunction, e: Exception
) -> None:
return
class TorchExportStrategy(CaptureStrategy):
def _capture(
self, model, args, kwargs, dynamic_shapes
) -> torch.export.ExportedProgram:
with _patch_dynamo_unsupported_functions():
try:
return torch.export.export(
model,
args,
kwargs=kwargs,
dynamic_shapes=dynamic_shapes,
strict=True,
)
except torch._dynamo.exc.UserError as exc:
# Refine the dynamic shapes based on the suggested fixes.
try:
new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes(
exc.msg, dynamic_shapes
)
except Exception:
# If the dynamic shapes cannot be refined, re-raise the exception.
raise exc from None
return torch.export.export(
model, args, kwargs=kwargs, dynamic_shapes=new_shapes, strict=True
)
def _enter(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export`..."
)
def _success(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export`... ✅"
)
def _failure(self, model, e) -> None:
del e # Unused
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export`... ❌"
)
class TorchExportNonStrictStrategy(CaptureStrategy):
def _capture(
self, model, args, kwargs, dynamic_shapes
) -> torch.export.ExportedProgram:
try:
return torch.export.export(
model, args, kwargs=kwargs, dynamic_shapes=dynamic_shapes, strict=False
)
except torch._dynamo.exc.UserError as exc:
# Refine the dynamic shapes based on the suggested fixes.
try:
new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes(
exc.msg, dynamic_shapes
)
except Exception:
# If the dynamic shapes cannot be refined, re-raise the exception.
raise exc from None
return torch.export.export(
model, args, kwargs=kwargs, dynamic_shapes=new_shapes, strict=False
)
def _enter(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`..."
)
def _success(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ✅"
)
def _failure(self, model, e) -> None:
del e # Unused
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=False)`... ❌"
)
class JitTraceConvertStrategy(CaptureStrategy):
def _capture(
self, model, args, kwargs, dynamic_shapes
) -> torch.export.ExportedProgram:
# Avoid circular import
from torch._export import converter as _torchscript_converter
flattened_args, spec = _pytree.tree_flatten((args, kwargs))
flattened_args = tuple(flattened_args)
# Since torch.jit.trace only accepts Tensors as inputs, we filter
# out non-Tensor arguments and reconstruct the arguments after entering
# the WrappedModel.
tensor_placeholder = object()
non_tensor_args = [
arg if not isinstance(arg, torch.Tensor) else tensor_placeholder
for arg in flattened_args
]
tensor_args = tuple(
arg for arg in flattened_args if isinstance(arg, torch.Tensor)
)
class WrappedModel(torch.nn.Module):
"""Wrap the model so that it takes flattened arguments."""
def __init__(self, m):
super().__init__()
self.model = m
def forward(self, *_args):
# Take the non-Tensor arguments list as a starting point and
# replace the tensor_placeholder with the actual tensor arguments
# from _args.
reconstructed_flattened_args = non_tensor_args.copy()
_args_iter = iter(_args)
for i, arg in enumerate(reconstructed_flattened_args):
if arg is tensor_placeholder:
reconstructed_flattened_args[i] = next(_args_iter)
# Unflatten the arguments and kwargs to pass to the model.
unflattened_args, unflattened_kwargs = _pytree.tree_unflatten(
reconstructed_flattened_args, spec
)
results = self.model(*unflattened_args, **unflattened_kwargs)
if not isinstance(results, tuple):
results = (results,)
flattened_results, _ = _pytree.tree_flatten(results)
if len(flattened_results) == 1:
return flattened_results[0]
return tuple(flattened_results)
jit_model = torch.jit.trace(
WrappedModel(model),
example_inputs=tensor_args,
check_trace=False,
strict=False,
)
if self._dump:
program_path = self._artifacts_dir / f"onnx_export_{self._timestamp}.pt"
try:
torch.jit.save(jit_model, program_path)
except Exception as e:
self._verbose_print(
f"Failed to save Torch Script model due to an error: {e}"
)
else:
self._verbose_print(
f"Torch Script model has been saved to '{program_path}'."
)
ep = _torchscript_converter.TS2EPConverter(jit_model, flattened_args).convert()
if dynamic_shapes is not None:
# Retrace with torch.export to get dynamic shapes
ep = torch.export.export(
ep.module(),
flattened_args,
dynamic_shapes=dynamic_shapes,
strict=False,
)
return ep
def _enter(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with Torch Script..."
)
def _success(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with Torch Script... ✅"
)
def _failure(self, model, e) -> None:
del e # Unused
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with Torch Script... ❌"
)
CAPTURE_STRATEGIES = (
TorchExportNonStrictStrategy, # strict=False is preferred over strict=True because it does not have dynamo issues
TorchExportStrategy,
JitTraceConvertStrategy,
)
```
|
================================================================================================================================
SOURCE CODE FILE: _compat.py
LINES: 1
SIZE: 7.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_compat.py
ENCODING: utf-8
```py
"""Compatibility functions for the torch.onnx.export API."""
# mypy: allow-untyped-defs
# mypy: disable-error-code=attr-defined
from __future__ import annotations
import logging
import warnings
from collections.abc import Mapping, Sequence
from typing import Any, Callable, TYPE_CHECKING
import torch
from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir
from torch.onnx._internal.exporter import (
_constants,
_core,
_dynamic_shapes,
_onnx_program,
_registration,
)
if TYPE_CHECKING:
import os
logger = logging.getLogger(__name__)
def _get_torch_export_args(
args: tuple[Any, ...],
kwargs: dict[str, Any] | None,
) -> tuple[tuple[Any, ...], dict[str, Any] | None]:
"""Obtain the arguments for torch.onnx.export from the model and the input arguments."""
if not kwargs and args and isinstance(args[-1], dict):
kwargs = args[-1]
args = args[:-1]
return args, kwargs
def export_compat(
model: torch.nn.Module
| torch.export.ExportedProgram
| torch.jit.ScriptModule
| torch.jit.ScriptFunction,
args: tuple[Any, ...],
f: str | os.PathLike | None = None,
*,
kwargs: dict[str, Any] | None = None,
export_params: bool = True,
verbose: bool | None = None,
input_names: Sequence[str] | None = None,
output_names: Sequence[str] | None = None,
opset_version: int | None = None,
custom_translation_table: dict[Callable, Callable | Sequence[Callable]]
| None = None,
dynamic_axes: Mapping[str, Mapping[int, str]]
| Mapping[str, Sequence[int]]
| None = None,
dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None,
keep_initializers_as_inputs: bool = False,
external_data: bool = True,
report: bool = False,
optimize: bool = False,
verify: bool = False,
profile: bool = False,
dump_exported_program: bool = False,
artifacts_dir: str | os.PathLike = ".",
fallback: bool = False,
) -> _onnx_program.ONNXProgram:
if opset_version is None:
opset_version = _constants.TORCHLIB_OPSET
if isinstance(model, torch.export.ExportedProgram):
# We know the model is already exported program, so the args, kwargs, and dynamic_shapes
# are not used
dynamic_shapes = dynamic_shapes or {}
else:
args, kwargs = _get_torch_export_args(args, kwargs)
if dynamic_shapes is None and dynamic_axes is not None:
warnings.warn(
"# 'dynamic_axes' is not recommended when dynamo=True, "
"and may lead to 'torch._dynamo.exc.UserError: Constraints violated.' "
"Supply the 'dynamic_shapes' argument instead if export is unsuccessful.",
UserWarning,
stacklevel=3,
)
try:
dynamic_shapes, args, kwargs = (
_dynamic_shapes.from_dynamic_axes_to_dynamic_shapes(
model,
args,
kwargs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=set(output_names or ()),
)
)
except Exception as e:
raise RuntimeError(
"# Failed to convert 'dynamic_axes' to 'dynamic_shapes'. "
"Please provide 'dynamic_shapes' directly. "
"Refer to the documentation for 'torch.export.export' for more information on dynamic shapes."
) from e
dynamic_shapes_with_export_dim, need_axis_mapping = (
_dynamic_shapes.convert_str_to_export_dim(dynamic_shapes)
)
registry = _registration.ONNXRegistry.from_torchlib()
if custom_translation_table is not None:
for torch_op, onnx_ops in custom_translation_table.items():
# TODO(justinchuby): Support complex inputs with annotations
if not isinstance(onnx_ops, Sequence):
onnx_ops = (onnx_ops,)
for op in reversed(onnx_ops):
# register_op places the op in the front of all onnx variants,
# so we reverse the list to maintain the order of the custom ops provided
registry.register_op(torch_op, op, is_complex=False)
try:
onnx_program = _core.export(
model,
args,
kwargs,
registry=registry,
dynamic_shapes=dynamic_shapes_with_export_dim,
input_names=input_names,
output_names=output_names,
profile=profile,
report=report,
verify=verify,
dump_exported_program=dump_exported_program,
artifacts_dir=artifacts_dir,
verbose=verbose,
)
except Exception as e:
if fallback:
if verbose is not False:
print(
"[torch.onnx] Falling back to legacy torch.onnx.export due "
f"to the following error: {e}",
)
if f is None:
raise TypeError("f must be provided when fallback is enabled") from e
if dynamic_shapes is not None and dynamic_axes is None:
if input_names is None:
raise ValueError(
"Failed to convert dynamic_shapes to dynamic_axes. "
"Either input_names or dynamic_axes must be provided "
"when dynamic is requested in fallback"
) from e
dynamic_axes = _dynamic_shapes.from_dynamic_shapes_to_dynamic_axes(
dynamic_shapes=dynamic_shapes, input_names=input_names, exception=e
)
torch.onnx.utils.export(
model, # type: ignore[arg-type]
args,
f, # type: ignore[arg-type]
kwargs=kwargs,
export_params=export_params,
input_names=input_names,
output_names=output_names,
opset_version=17, # TODO(justinchuby): Hard coded to 17 for now
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=keep_initializers_as_inputs,
)
onnx_program = _onnx_program.ONNXProgram(ir.load(f), None)
# NOTE: It it's falling back to the legacy exporter, we don't need to
# optimize the model, so we return it here. Users can still optimize
# the model using the optimize() if they want.
return onnx_program
else:
raise
if need_axis_mapping and dynamic_shapes is not None:
onnx_program._rename_dynamic_axes(dynamic_shapes)
# Converter opset version and optimize
onnx_program.model = onnxscript_apis.convert_version(
onnx_program.model, opset_version
)
if optimize:
onnx_program.optimize()
if f is not None:
onnx_program.save(
f,
include_initializers=export_params,
keep_initializers_as_inputs=keep_initializers_as_inputs,
external_data=external_data,
)
return onnx_program
```
|
===================================================================================================================================
SOURCE CODE FILE: _constants.py
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_constants.py
ENCODING: utf-8
```py
# ir_version used for the ONNX file. See https://github.com/onnx/onnx/blob/main/docs/IR.md#onnx-versioning
ONNX_IR_VERSION = 10
# The opset version torchlib is implemented with. Update this number when updating torchlib
TORCHLIB_OPSET = 18
TORCHLIB_DOMAIN = "pkg.torch.onnx"
# Domain used for functions translated from subgraphs
LOCAL_FUNCTION_DOMAIN = "pkg.torch.__subgraph__"
```
|
==============================================================================================================================
SOURCE CODE FILE: _core.py
LINES: 24
SIZE: 63.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_core.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# flake8: noqa: B950 We do not need flake8 as it complains line length
from __future__ import annotations
import ctypes
import datetime
import inspect
import itertools
import logging
import operator
import pathlib
import textwrap
import traceback
import typing
from collections.abc import Mapping, Sequence
from typing import Any, Callable, Literal
import onnxscript
import onnxscript.evaluator
from onnxscript import ir
from onnxscript.ir import convenience as ir_convenience
import torch
import torch.fx
from torch.export import graph_signature
from torch.onnx._internal._lazy_import import onnxscript_apis
from torch.onnx._internal.exporter import (
_analysis,
_building,
_capture_strategies,
_constants,
_dispatching,
_errors,
_fx_passes,
_ir_passes,
_onnx_program,
_registration,
_reporting,
_tensors,
_verification,
)
if typing.TYPE_CHECKING:
import os
import numpy.typing as npt
# Define utilities to convert PyTorch data types so users do not need to specify manually
_TORCH_DTYPE_TO_ONNX: dict[torch.dtype, ir.DataType] = {
torch.bfloat16: ir.DataType.BFLOAT16,
torch.bool: ir.DataType.BOOL,
torch.complex128: ir.DataType.COMPLEX128,
torch.complex64: ir.DataType.COMPLEX64,
torch.float16: ir.DataType.FLOAT16,
torch.float32: ir.DataType.FLOAT,
torch.float64: ir.DataType.DOUBLE,
torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN,
torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ,
torch.float8_e5m2: ir.DataType.FLOAT8E5M2,
torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ,
torch.int16: ir.DataType.INT16,
torch.int32: ir.DataType.INT32,
torch.int64: ir.DataType.INT64,
torch.int8: ir.DataType.INT8,
torch.uint8: ir.DataType.UINT8,
torch.uint16: ir.DataType.UINT16,
torch.uint32: ir.DataType.UINT32,
torch.uint64: ir.DataType.UINT64,
}
_BLUE = "\033[96m"
_END = "\033[0m"
_STEP_ONE_ERROR_MESSAGE = textwrap.dedent(
f"""\
Failed to export the model with torch.export. {_BLUE}This is step 1/3{_END} of exporting the model to ONNX. Next steps:
- Modify the model code for `torch.export.export` to succeed. Refer to https://pytorch.org/docs/stable/generated/exportdb/index.html for more information.
- Debug `torch.export.export` and summit a PR to PyTorch.
- Create an issue in the PyTorch GitHub repository against the {_BLUE}*torch.export*{_END} component and attach the full error stack as well as reproduction scripts."""
)
_STEP_TWO_ERROR_MESSAGE = textwrap.dedent(
f"""\
Failed to decompose the FX graph for ONNX compatibility. {_BLUE}This is step 2/3{_END} of exporting the model to ONNX. Next steps:
- Create an issue in the PyTorch GitHub repository against the {_BLUE}*torch.export*{_END} component and attach the full error stack as well as reproduction scripts.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component. Attach the error report and the pt2 model."""
)
_STEP_THREE_ERROR_MESSAGE = textwrap.dedent(
f"""\
Failed to convert the exported program to an ONNX model. {_BLUE}This is step 3/3{_END} of exporting the model to ONNX. Next steps:
- If there is a missing ONNX function, implement it and register it to the registry.
- If there is an internal error during ONNX conversion, debug the error and summit a PR to PyTorch.
- Create an error report with `torch.onnx.export(..., report=True)`, and save the ExportedProgram as a pt2 file. Create an issue in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component. Attach the error report and the pt2 model."""
)
logger = logging.getLogger(__name__)
# The current tracer that is being used to trace the operators,
# used by torch/onnx/_internal/exporter/_torchlib/ops/hop.py
current_tracer: _building.OpRecorder | None = None
def _torch_dtype_to_onnx_dtype(dtype: torch.dtype) -> ir.DataType:
return _TORCH_DTYPE_TO_ONNX[dtype]
class TorchTensor(ir.Tensor):
def __init__(self, tensor: torch.Tensor, name: str | None = None):
# Pass the tensor as the raw data to ir.Tensor's constructor
super().__init__(
tensor, dtype=_torch_dtype_to_onnx_dtype(tensor.dtype), name=name
)
def numpy(self) -> npt.NDArray:
self.raw: torch.Tensor
if self.dtype == ir.DataType.BFLOAT16:
return self.raw.view(torch.uint16).numpy(force=True)
if self.dtype in {
ir.DataType.FLOAT8E4M3FN,
ir.DataType.FLOAT8E4M3FNUZ,
ir.DataType.FLOAT8E5M2,
ir.DataType.FLOAT8E5M2FNUZ,
}:
# TODO: Use ml_dtypes
return self.raw.view(torch.uint8).numpy(force=True)
return self.raw.numpy(force=True)
def __array__(self, dtype: Any = None, copy: bool | None = None) -> npt.NDArray:
del copy # Unused, but needed for the signature
if dtype is None:
return self.numpy()
return self.numpy().__array__(dtype)
def tobytes(self) -> bytes:
# Implement tobytes to support native PyTorch types so we can use types like bloat16
# Reading from memory directly is also more efficient because
# it avoids copying to a NumPy array
import torch._subclasses.fake_tensor
with torch._subclasses.fake_tensor.unset_fake_temporarily():
# Disable any fake mode so calling detach() etc. will return a real tensor
tensor = self.raw.detach().cpu().contiguous()
if isinstance(tensor, torch._subclasses.fake_tensor.FakeTensor):
raise TypeError(
f"Cannot take content out from the FakeTensor ('{self.name}'). Please replace the tensor "
"with a tensor backed by real data using ONNXProgram.apply_weights() "
"or save the model without initializers by setting include_initializers=False."
)
return bytes(
(ctypes.c_ubyte * tensor.element_size() * tensor.numel()).from_address(
tensor.data_ptr()
)
)
# https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L56C1-L62C19
# class InputKind(Enum):
# USER_INPUT = auto()
# PARAMETER = auto()
# BUFFER = auto()
# CONSTANT_TENSOR = auto()
# CUSTOM_OBJ = auto()
# TOKEN = auto()
# https://github.com/pytorch/pytorch/blob/ee6cb6daa173896f8ea1876266a19775aaa4f610/torch/export/graph_signature.py#L89C1-L96C19
# class OutputKind(Enum):
# USER_OUTPUT = auto()
# LOSS_OUTPUT = auto()
# BUFFER_MUTATION = auto()
# GRADIENT_TO_PARAMETER = auto()
# GRADIENT_TO_USER_INPUT = auto()
# USER_INPUT_MUTATION = auto()
# TOKEN = auto()
def _set_shape_types(
values: Sequence[ir.Value],
meta_vals: Sequence[torch.Tensor],
complex_to_float: bool = True,
) -> None:
if not isinstance(meta_vals, Sequence):
logger.warning(
"Expected meta_vals to be a sequence, but got %s. There may be an internal error.",
meta_vals,
)
meta_vals = (meta_vals,)
for value, meta_val in zip(values, meta_vals):
_set_shape_type(value, meta_val, complex_to_float=complex_to_float)
def _set_shape_type(
value: ir.Value,
meta_val: torch.Tensor
| torch.SymBool
| torch.SymInt
| torch.SymFloat
| tuple[torch.Tensor],
complex_to_float: bool,
) -> None:
# TODO: Consider using meta["tensor_meta"] for this? Would it be faster?
if isinstance(meta_val, tuple):
logger.warning("Setting shape and type of tensors is not supported yet")
if isinstance(meta_val, torch.Tensor):
# FIXME: Consider shape for complex values
dims = []
for dim in meta_val.shape:
if isinstance(dim, int):
dims.append(dim)
else:
dims.append(str(dim.node))
value.dtype = _torch_dtype_to_onnx_dtype(meta_val.dtype)
if complex_to_float:
if meta_val.dtype == torch.complex64:
value.dtype = ir.DataType.FLOAT
# Add 2 as the last dimension if the tensor is complex to hold the real/imag parts
dims.append(2)
elif meta_val.dtype == torch.complex128:
value.dtype = ir.DataType.DOUBLE
# Add 2 as the last dimension if the tensor is complex to hold the real/imag parts
dims.append(2)
value.shape = ir.Shape(dims)
elif isinstance(meta_val, (int, torch.SymInt)):
# aten::sym_size output is a int, not a tensor, which stands
# for the size of one dim. We treat it as a scalar.
value.dtype = ir.DataType.INT64
value.shape = ir.Shape([])
elif isinstance(meta_val, (bool, torch.SymBool)):
value.dtype = ir.DataType.BOOL
value.shape = ir.Shape([])
elif isinstance(meta_val, (float, torch.SymFloat)):
value.dtype = ir.DataType.FLOAT
value.shape = ir.Shape([])
else:
pass
def _get_qualified_module_name(cls: Any) -> str:
if isinstance(cls, str):
return cls
module = cls.__module__
if module is None or module == str.__class__.__module__:
return cls.__name__
return module + "." + cls.__name__
def _get_node_namespace(node: torch.fx.Node) -> tuple[str, list[str], list[str]]:
"""Get the namespace and scope of the node.
Example::
{
'L__self__': ('', <class 'torchvision.models.resnet.ResNet'>),
'L__self___avgpool': ('avgpool', <class 'torch.nn.modules.pooling.AdaptiveAvgPool2d'>)
}
Will yield
namespace: ": torchvision.models.resnet.ResNet/avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d/node_name: node_target"
class_hierarchy: ["torchvision.models.resnet.ResNet", "torch.nn.modules.pooling.AdaptiveAvgPool2d", <node_target>]
name_scopes: ["", "avgpool", <node_name>]
Args:
node: The node to get the namespace and scope of.
Returns:
(namespace, class_hierarchy, name_scope)
"""
nn_module_stack = node.meta.get("nn_module_stack")
logger.debug("%s", nn_module_stack)
if nn_module_stack is None:
logger.warning(
"nn_module_stack not found for node '%s'. Skip adding metadata...",
node.name,
)
return f"{node.name}: {node.target}", [str(node.target)], [node.name]
namespaces = []
class_hierarchy = []
name_scopes = []
for name, nn_module in nn_module_stack.values():
name_scopes.append(name)
nn_module_name = _get_qualified_module_name(nn_module)
class_hierarchy.append(nn_module_name)
namespaces.append(f"{name}: {_get_qualified_module_name(nn_module)}")
namespaces.append(f"{node.name}: {node.target}")
class_hierarchy.append(str(node.target))
name_scopes.append(node.name)
return "/".join(namespaces), class_hierarchy, name_scopes
def _set_node_metadata(fx_node: torch.fx.Node, ir_node: ir.Node) -> None:
"""Adds namespace and other node metadata to the ONNX node."""
namespace, class_hierarchy, name_scopes = _get_node_namespace(fx_node)
ir_node.metadata_props["namespace"] = namespace
ir_node.metadata_props["pkg.torch.onnx.class_hierarchy"] = repr(class_hierarchy)
ir_node.metadata_props["pkg.torch.onnx.name_scopes"] = repr(name_scopes)
ir_node.metadata_props["pkg.torch.onnx.fx_node"] = str(fx_node.format_node())
ir_node.metadata_props["pkg.torch.onnx.stack_trace"] = fx_node.meta.get(
"stack_trace", ""
)
def _handle_getitem_node(
node: torch.fx.Node, node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]]
) -> ir.Value:
"""Handle a getitem node.
Add the input value it is getting to the mapping, then return the value.
There are two cases for this node:
1. The output is a Sequence (traced), we can simply get the value from the sequence
2. The output is produced by a SplitToSequence node, we need to get the value from the sequence value
This function only handles the first case
"""
assert len(node.all_input_nodes) == 1
source = node.all_input_nodes[0]
source_outputs = node_name_to_values[source.name]
assert isinstance(source_outputs, Sequence), (
f"Expected {source.name} to output sequence, got {node_name_to_values[source.name]}"
)
index = typing.cast(int, node.args[1])
value = source_outputs[index]
# Save the getitem value to the values mapping to in case
# it is one of the graph outputs
node_name_to_values[node.name] = value
# Rename the name of value with the getitem name.
value.name = node.name
return value
def _handle_call_function_node(
graph_like: ir.Graph | ir.Function,
node: torch.fx.Node,
node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
) -> None:
"""Handle a call_function node.
Args:
graph: The ONNX graph at construction.
node: The FX node to translate.
node_name_to_values: A mapping of FX node names to their produced ir.Value.
"""
if node.target == operator.getitem:
_handle_getitem_node(node, node_name_to_values)
# Add op to the graph
op = str(node.target)
fx_inputs, attributes, input_names, output_names = _get_inputs_and_attributes(node)
inputs: list[ir.Value | None] = []
for i, input_ in enumerate(fx_inputs):
if input_ is None:
inputs.append(None)
elif hasattr(input_, "name"):
if isinstance(input_, torch.fx.Node) and input_.target == operator.getitem:
actual_input = _handle_getitem_node(input_, node_name_to_values)
inputs.append(actual_input)
else:
value = node_name_to_values[input_.name]
assert not isinstance(value, Sequence)
inputs.append(value)
else:
attributes[f"arg_{i}"] = input_
outputs = [ir.Value(name=name) for name in output_names]
if len(outputs) > 1:
_set_shape_types(outputs, node.meta["val"], complex_to_float=False)
node_name_to_values[node.name] = outputs
else:
_set_shape_type(outputs[0], node.meta["val"], complex_to_float=False)
node_name_to_values[node.name] = outputs[0]
ir_node = ir.Node(
"pkg.torch.ops",
op,
inputs,
attributes=ir_convenience.convert_attributes(attributes),
outputs=outputs,
name=node.name,
)
ir_node.meta["node"] = node
ir_node.metadata_props["pkg.torch.onnx.input_names"] = repr(input_names)
# Record the nn.Module stack for the node
_set_node_metadata(node, ir_node)
graph_like.append(ir_node)
def _convert_fx_arg_to_onnx_arg(
arg,
node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
node_name_to_local_functions: dict[str, ir.Function],
) -> Any:
"""Convert an FX argument to an ONNX compatible argument.
This function
- Converts a torch dtype to an integer
- Converts a torch device/memory_format/layout to a string
- Converts a torch.fx.Node to an ir.Value
- Converts a sequence of torch.fx.Node to a sequence of ir.Value
- Converts a get_attr node to an ir.Function
"""
if arg is None:
# None arguments are not modified because when the arg is an ONNX input
# we need to preserve the None value; when the arg is an ONNX attribute,
# we want to drop the value.
# The actual dropping of a None attribute value is done by OpRecorder
return None
if hasattr(arg, "name"):
if isinstance(arg, torch.fx.Node) and arg.target == operator.getitem:
source = arg.all_input_nodes[0]
source_outputs = node_name_to_values[source.name]
if isinstance(source_outputs, Sequence):
# If the node is getting an input from another node, get the actual value the node is retrieving
return _handle_getitem_node(arg, node_name_to_values)
else:
# `source_outputs` is a sequence(tensor()) value and we need to
# use SequenceAt to get the value. This is handled by torchlib
pass
if isinstance(arg, torch.fx.Node) and arg.op == "get_attr":
return node_name_to_local_functions[arg.name]
# If the input is a node, get the value from the mapping
return node_name_to_values[arg.name]
if isinstance(arg, (list, tuple)):
return [
_convert_fx_arg_to_onnx_arg(
elem, node_name_to_values, node_name_to_local_functions
)
for elem in arg
]
if isinstance(arg, (torch.device, torch.memory_format, torch.layout)):
return str(arg)
if isinstance(arg, torch.dtype):
return _torch_dtype_to_onnx_dtype(arg)
# Maybe a Python value
return arg
def _get_onnxscript_opset(opset_version: int) -> onnxscript.values.Opset:
return onnxscript.values.Opset("", opset_version)
def _handle_call_function_node_with_lowering(
model: ir.Model,
node: torch.fx.Node,
node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
*,
graph_like: ir.Graph | ir.Function,
constant_farm: dict[Any, ir.Value],
registry: _registration.ONNXRegistry,
opset: onnxscript.values.Opset,
node_name_to_local_functions: dict[str, ir.Function],
) -> None:
"""Translate a call_function node to an ONNX node.
Args:
model: The ONNX model at construction.
node: The FX node to translate.
node_name_to_values: A mapping of FX node names to their produced ONNX ``Value``.
graph_like: The current ONNX graph at construction.
Must add nodes to this graph because it can be a subgraph that is currently being constructed.
constant_farm: A mapping of constant values to existing ONNX ``Value``s.
registry: The registry of all aten to ONNX decomposition functions.
opset: The ONNX Script opset object for constructing ONNX nodes.
node_name_to_local_functions: A mapping of subgraph names to the corresponding ONNX functions.
"""
if node.target == operator.getitem:
source = node.all_input_nodes[0]
source_outputs = node_name_to_values[source.name]
if isinstance(source_outputs, Sequence):
_handle_getitem_node(node, node_name_to_values)
return
else:
# `source_outputs` is a sequence(tensor()) value and we need to
# use SequenceAt to get the value. This is handled by torchlib
pass
# Find the matching ONNX overload for the node
# NOTE: Create different registries for different ONNX opset versions
# TODO: Log the message here to expose false positives
onnx_function, message = _dispatching.dispatch(node, registry)
if onnx_function is None:
# TODO(justinchuby): Fall back to ATen op or do something else?
raise _errors.DispatchError(
f"No ONNX function found for {node.target!r}. Failure message: {message}"
)
# Map FX inputs to ONNX inputs and fill optional inputs.
# torch_args and torch_kwargs are for op-level validation
fx_args = node.args
fx_kwargs = node.kwargs
# Replace the input FX nodes with ONNX values
onnx_args = [
_convert_fx_arg_to_onnx_arg(
input_, node_name_to_values, node_name_to_local_functions
)
for input_ in fx_args
]
onnx_kwargs = {}
for key, value in fx_kwargs.items():
onnx_kwargs[key] = _convert_fx_arg_to_onnx_arg(
value, node_name_to_values, node_name_to_local_functions
)
if key == "dtype" and onnx_kwargs[key] is None:
# Set dtype to -1 if it is None
# TODO(justinchuby): Maybe keep it as None?
onnx_kwargs[key] = -1
with onnxscript.evaluator.default_as(
tracer := _building.OpRecorder(opset, constant_farm)
):
global current_tracer
current_tracer = tracer
try:
outputs = onnx_function(*onnx_args, **onnx_kwargs)
except Exception as e:
raise _errors.GraphConstructionError(
f"Error when calling function '{onnx_function}' with args '{onnx_args}' and kwargs '{onnx_kwargs}'"
) from e
finally:
current_tracer = None
# NOTE: Instead of using the output names from node.target._schema,
# we always use the index if there are more than one outputs so the
# names can be programmatically reconstructed. This is useful for
# comparing values from the ONNX graph with those from the FX graph.
#
# When there are multiple outputs, the output names will be
# node_name__0, node_name__1, etc.
if isinstance(outputs, Sequence):
_set_shape_types(outputs, node.meta["val"], complex_to_float=True)
node_name_to_values[node.name] = outputs
for i, output in enumerate(outputs):
output.name = f"{node.name}__{i}"
else:
_set_shape_type(outputs, node.meta["val"], complex_to_float=True)
node_name_to_values[node.name] = outputs
outputs.name = node.name
for ir_node in tracer.nodes:
ir_node.meta["node"] = node
# Record the nn.Module stack for the node
_set_node_metadata(node, ir_node)
# Add the traced nodes to the current graph
# Must add nodes to this graph, not model.graph, because it can be a subgraph that is currently being constructed
graph_like.extend(tracer.nodes)
# Add the defined functions to the model
for identifier, onnxscript_function in tracer.functions.items():
if identifier in model.functions:
continue
if isinstance(onnxscript_function, ir.Function):
ir_function = onnxscript_function
else:
# TODO: Get IR function directly when onnxscript is updated
proto = onnxscript_function.to_function_proto()
ir_function = ir.serde.deserialize_function(proto)
model.functions[identifier] = ir_function
# Opset imports are added to the model in the final add_opset_imports pass
def _handle_placeholder_node(
node: torch.fx.Node,
node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
*,
graph_like: ir.Graph | ir.Function,
lower: str,
opset: onnxscript.values.Opset,
) -> None:
# Placeholder nodes are user inputs
# We need to create a new tensor for each user input
# and add it to the graph's inputs
name = node.name
input_ = _tensors.SymbolicTensor(opset, name=name)
input_.meta["node"] = node
_set_shape_type(input_, node.meta["val"], complex_to_float=lower != "none")
node_name_to_values[name] = input_
# The inputs should be add to the graph here
graph_like.inputs.append(input_)
def _handle_get_attr_node(
node: torch.fx.Node,
*,
owned_graphs: Mapping[str, ir.Function],
node_name_to_local_functions: dict[str, ir.Function],
) -> None:
"""Handle a get_attr node by assigning the corresponding ONNX function to the node name.
An example ExportedProgram that has uses get_attr nodes is:
ExportedProgram:
class GraphModule(torch.nn.Module):
def forward(self, arg0_1: "f32[5]"):
true_graph_0 = self.true_graph_0 # get_attr
false_graph_0 = self.false_graph_0 # get_attr
conditional = torch.ops.higher_order.cond(False, true_graph_0, false_graph_0, [arg0_1]); true_graph_0 = false_graph_0 = arg0_1 = None
getitem: "f32[5]" = conditional[0]; conditional = None
return (getitem,)
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[5]"):
cos: "f32[5]" = torch.ops.aten.cos.default(arg0_1); arg0_1 = None
return (cos,)
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[5]"):
sin: "f32[5]" = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
return (sin,)
Args:
node: The FX node to translate.
owned_graphs: A mapping of subgraph names to the corresponding ONNX functions.
node_name_to_local_functions: A mapping of local function names to their corresponding ONNX functions.
"""
if not isinstance(node.target, str):
logger.warning(
"Expected node.target for the node %s to be a string, but got '%s'. There may be an internal error.",
node,
type(node.target),
)
return
function = owned_graphs[node.target]
node_name_to_local_functions[node.name] = function
def _handle_output_node(
node: torch.fx.Node,
node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]],
graph_like: ir.Graph | ir.Function,
) -> None:
"""Handle an output node by adding the output to the graph's outputs.
Args:
node: The FX node to translate.
node_name_to_values: A mapping of FX node names to their produced ONNX ``Value``.
graph_like: The ONNX graph at construction.
"""
# node.args[0] can be a tuple with more than one elements. This happens when,
# for example, a subgraph has multiple outputs. We flatten them all as ONNX graph outputs
for output in node.args[0]: # type: ignore[index,union-attr]
output_value_name = output.name # type: ignore[union-attr]
assert isinstance(output_value_name, str), (
f"Bug: Expected {output_value_name!r} to be a string"
)
values = node_name_to_values[output_value_name]
if isinstance(values, Sequence):
graph_like.outputs.extend(values)
return
graph_like.outputs.append(values)
def _translate_fx_graph(
fx_graph: torch.fx.Graph,
model: ir.Model,
*,
graph_like: ir.Graph | ir.Function,
owned_graphs: Mapping[str, ir.Function],
lower: Literal["at_conversion", "none"],
registry: _registration.ONNXRegistry,
) -> dict[str, ir.Value | Sequence[ir.Value]]:
"""Translate a submodule to an ONNX function.
Any functions used by the traced functions will be added to the model.
Args:
fx_graph: The FX graph module to translate.
model: The ONNX model at construction.
current_scope: The current name scope of the submodule, excluding the current module name.
E.g. "true_graph_0.false_graph_0".
graph_name: The name of the submodule. E.g. "true_graph_0".
graph: The ONNX graph at construction.
owned_graphs: The subgraphs owned by the current graph.
lower: The lowering strategy to use.
registry: The registry of all aten to ONNX decomposition functions.
Returns:
A mapping of FX node names to their produced ONNX ``Value``.
"""
node_name_to_values: dict[str, ir.Value | Sequence[ir.Value]] = {}
# The reason we need node_name_to_local_functions in addition to owned_graphs
# is because the get_attr nodes may assign a different name than the GraphModule name
# to the subgraph. This is not typical but is valid Python.
node_name_to_local_functions: dict[str, ir.Function] = {}
constant_farm: dict[Any, ir.Value] = {}
opset = _get_onnxscript_opset(registry.opset_version)
for node in fx_graph.nodes:
logger.debug(
"%s", (node.name, node.args, node.target, node.op, node.type, node.kwargs)
)
try:
if node.op == "placeholder":
_handle_placeholder_node(
node,
node_name_to_values,
graph_like=graph_like,
lower=lower,
opset=opset,
)
elif node.op == "call_function":
if lower == "at_conversion":
_handle_call_function_node_with_lowering(
model,
node,
node_name_to_values,
graph_like=graph_like,
constant_farm=constant_farm,
registry=registry,
opset=opset,
node_name_to_local_functions=node_name_to_local_functions,
)
else:
# No lowering
_handle_call_function_node(graph_like, node, node_name_to_values)
elif node.op == "get_attr":
_handle_get_attr_node(
node,
owned_graphs=owned_graphs,
node_name_to_local_functions=node_name_to_local_functions,
)
elif node.op == "output":
_handle_output_node(
node,
node_name_to_values,
graph_like=graph_like,
)
except Exception as e:
raise _errors.ConversionError(
f"Error when translating node {node.format_node()}. See the stack trace for more information."
) from e
return node_name_to_values
def _get_inputs_and_attributes(
node: torch.fx.Node,
) -> tuple[list[torch.fx.Node | None], dict[str, Any], list[str], list[str]]:
"""Find and Fill in the not provided kwargs with default values.
Returns:
(inputs, attributes, input_names, output_names)
"""
if inspect.isbuiltin(node.target) or isinstance(node.target, str):
inputs = list(node.args)
return inputs, {}, [], [node.name] # type: ignore[return-value]
# The target should be an ATen operator now
assert hasattr(node.target, "_schema"), (
f"The target should be an ATen operator now, but node target {node.target} has no schema"
)
node_schema: torch.FunctionSchema = node.target._schema
# This function assumes the order of arguments in FX op is the
# same as the order of arguments in TorchScript op.
inputs: list[Any] = [] # type: ignore[no-redef]
input_names: list[str] = []
attributes: dict[str, Any] = {}
if inspect.isbuiltin(node.target):
inputs = list(node.args)
else:
for arg, schema_arg in zip(node.args, node_schema.arguments):
if arg is None or isinstance(arg, torch.fx.Node):
inputs.append(arg)
input_names.append(schema_arg.name)
elif isinstance(arg, Sequence) and all(
elem is None or isinstance(elem, torch.fx.Node) for elem in arg
):
inputs.extend(arg)
input_names.extend([schema_arg.name] * len(arg))
elif isinstance(arg, torch.device):
attributes[schema_arg.name] = str(arg)
elif isinstance(arg, torch.dtype):
attributes[schema_arg.name] = _torch_dtype_to_onnx_dtype(arg)
else:
attributes[schema_arg.name] = arg
for schema_arg in node_schema.arguments:
if schema_arg.name not in node.kwargs:
continue
kwarg = node.kwargs[schema_arg.name]
if schema_arg.name in {
"layout",
"device",
"requires_grad",
"memory_format",
"implicit",
} or isinstance(kwarg, torch.device):
attr = str(kwarg)
elif isinstance(kwarg, torch.dtype):
attr = _torch_dtype_to_onnx_dtype(kwarg) # type: ignore[assignment]
else:
attr = kwarg # type: ignore[assignment]
attributes[schema_arg.name] = attr
output_names = [f"{node.name}_{output.name}" for output in node_schema.returns]
return inputs, attributes, input_names, output_names # type: ignore[return-value]
def _maybe_start_profiler(should_profile: bool) -> Any:
if should_profile:
import pyinstrument # type: ignore[import-not-found]
profiler = pyinstrument.Profiler(async_mode="disabled")
profiler.start()
return profiler
return None
def _maybe_stop_profiler_and_get_result(profiler) -> str | None:
if profiler is None:
return None
profiler.stop()
return profiler.output_text(unicode=True)
def _format_exception(e: Exception) -> str:
"""Format the full traceback as Python would show it."""
return "\n".join(traceback.format_exception(type(e), e, e.__traceback__))
def _summarize_exception_stack(e: BaseException) -> str:
"""Format the exception stack by showing the text of each exception."""
causes = [e]
while e.__cause__ is not None:
causes.append(e.__cause__)
e = e.__cause__
return (
"\n\n## Exception summary\n\n"
+ "⬆️\n".join([f"{type(e)}: {e}\n" for e in reversed(causes)])
+ "\n(Refer to the full stack trace above for more information.)"
)
def _format_exceptions_for_all_strategies(
results: list[_capture_strategies.Result],
) -> str:
"""Format all the exceptions from the capture strategies."""
return "\n".join(
[
f"# ⚠️ Errors from strategy '{result.strategy}': -----------------------\n\n"
f"{_format_exception(result.exception)}\n"
for result in results
if result.exception is not None
]
)
def exported_program_to_ir(
exported_program: torch.export.ExportedProgram,
*,
registry: _registration.ONNXRegistry | None = None,
lower: Literal["at_conversion", "none"] = "at_conversion",
) -> ir.Model:
"""Convert an exported program to an ONNX IR model.
Reference:
- ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html
Args:
exported_program: The exported program to convert.
lower: Whether to lower the graph to core ONNX operators.
at_conversion: Lower whe translating the FX graph to ONNX IR.
none: Do not lower the graph.
registry: The registry of all ONNX Script decomposition.
"""
if registry is None:
registry = _registration.ONNXRegistry.from_torchlib()
if lower != "none":
exported_program = _prepare_exported_program_for_export(
exported_program, registry=registry
)
return _exported_program_to_onnx_program(
exported_program, registry=registry, lower=lower
).model
def _prepare_exported_program_for_export(
exported_program: torch.export.ExportedProgram,
*,
registry: _registration.ONNXRegistry,
) -> torch.export.ExportedProgram:
"""Decompose and apply pre-export transformations to the exported program."""
# Decompose the graph given the implemented torch ops in ONNX
exported_program = _fx_passes.decompose_with_registry(exported_program, registry)
graph_module = exported_program.graph_module
# Include explicit type promotion nodes
_fx_passes.insert_type_promotion_nodes(graph_module)
graph_module = _fx_passes.remove_assertion_nodes(graph_module)
# Reassign the graph module to save some runtime.
exported_program._graph_module = graph_module
return exported_program
def _get_scope_name(scoped_name: str) -> tuple[str, str]:
"""Get the scope and name of a node.
Examples::
>>> _get_scope_name('')
('', '')
>>> _get_scope_name('true_graph')
('', 'true_graph')
>>> _get_scope_name('true_graph.false_graph')
('true_graph', 'false_graph')
>>> _get_scope_name('true_graph.false_graph.some_graph')
('true_graph.false_graph', 'some_graph')
Args:
scoped_name: The scoped name of the node.
Returns:
(scope, name)
"""
if "." in scoped_name:
scope, name = scoped_name.rsplit(".", 1)
else:
scope, name = "", scoped_name
return scope, name
def _exported_program_to_onnx_program(
exported_program: torch.export.ExportedProgram,
*,
registry: _registration.ONNXRegistry,
lower: Literal["at_conversion", "none"] = "at_conversion",
) -> _onnx_program.ONNXProgram:
"""Convert an exported program to an ONNX Program.
The exported_program field in the returned ONNXProgram is one that is after
decompositions have been applied.
Reference:
- ExportedProgram spec: https://pytorch.org/docs/stable/export.ir_spec.html
Args:
exported_program: The exported program to convert. The exported program
should be the one that is after decompositions have been applied.
lower: Whether to lower the graph to core ONNX operators.
at_conversion: Lower whe translating the FX graph to ONNX IR.
none: Do not lower the graph.
registry: The registry of all ONNX Script decomposition.
"""
model = ir.Model(
graph=ir.Graph(
[],
[],
nodes=[],
# Opset imports are added to the model in the final add_opset_imports pass
name="main_graph",
metadata_props={
"pkg.torch.export.ExportedProgram.graph_signature": str(
exported_program.graph_signature
),
"pkg.torch.export.ExportedProgram.range_constraints": str(
exported_program.range_constraints
),
},
),
ir_version=_constants.ONNX_IR_VERSION,
producer_name="pytorch",
producer_version=torch.__version__,
)
# A dictionary storing the translated subgraphs as ONNX functions made available to outer graphs
# {<subgraph_scope>: {<subgraph_name>: <IR function>}}
scoped_subgraphs: dict[str, dict[str, ir.Function]] = {}
values = None
# 1. Translate all nodes in all subgraphs and the main graph
# Create a dictionary of values for the main graph for step 2-3 to add inputs and outputs
module: torch.fx.GraphModule
# Reverse the order of the modules so that the innermost module is processed first
# and made available to the outer module
for name, module in reversed(
tuple(exported_program.graph_module.named_modules(remove_duplicate=False))
):
# Obtain the graphs (previously built) owned by the current module
owned_graphs = scoped_subgraphs.setdefault(name, {})
fx_graph = module.graph
graph_like: ir.Graph | ir.Function
if name == "":
# Root graph
graph_like = model.graph
else:
function_name = name.replace(".", "__")
# Inputs and outputs will be created within _translate_fx_graph
func = ir.Function(
domain=_constants.LOCAL_FUNCTION_DOMAIN,
name=function_name,
graph=ir.Graph((), (), nodes=()),
attributes=(),
)
# Make this function available to the outer graph
scope, subgraph_name = _get_scope_name(name)
scoped_subgraphs.setdefault(scope, {})[subgraph_name] = func
model.functions[func.identifier()] = func
graph_like = func
values = _translate_fx_graph(
fx_graph,
model,
graph_like=graph_like,
owned_graphs=owned_graphs,
lower=lower,
registry=registry,
)
assert name == "", "The last module processed should be the root module"
assert values is not None
# Clear the input/output of the main graph and add them back in step 2-3
# using the more accurate graph signature
model.graph.inputs.clear()
model.graph.outputs.clear()
# 2. Add user inputs and all parameters/buffers to the graph.
# Since the node names and the tensor names are different, we need to rename
# the nodes to match the tensor names later. For now we will just use the node names.
user_inputs = [
spec
for spec in exported_program.graph_signature.input_specs
if spec.kind == graph_signature.InputKind.USER_INPUT
]
non_user_inputs = [
spec
for spec in exported_program.graph_signature.input_specs
if spec.kind != graph_signature.InputKind.USER_INPUT
]
for spec in itertools.chain(user_inputs, non_user_inputs):
# Put the user inputs first and then the parameters/buffers
if isinstance(spec.arg, graph_signature.ConstantArgument):
logger.debug("Skipping constant argument %s", spec.arg)
continue
value_name = spec.arg.name
input_kind = spec.kind
persistent = spec.persistent
value = values[value_name]
assert not isinstance(value, Sequence), (
f"Input '{value_name}' should not be a sequence. This is unexpected."
)
value.metadata_props["pkg.torch.export.graph_signature.InputSpec.kind"] = (
input_kind.name
)
value.metadata_props[
"pkg.torch.export.graph_signature.InputSpec.persistent"
] = str(persistent)
if input_kind == graph_signature.InputKind.USER_INPUT:
# Add only user inputs to the graph
# Subsequent passes can decide if they want to add initializers as inputs
model.graph.inputs.append(value)
else:
model.graph.initializers[value_name] = value
# 3. Add user outputs to the graph and assign metadata to all outputs
user_outputs = [
spec
for spec in exported_program.graph_signature.output_specs
if spec.kind == graph_signature.OutputKind.USER_OUTPUT
]
non_user_outputs = [
spec
for spec in exported_program.graph_signature.output_specs
if spec.kind != graph_signature.OutputKind.USER_OUTPUT
]
for spec in itertools.chain(user_outputs, non_user_outputs):
if isinstance(spec.arg, graph_signature.ConstantArgument):
logger.warning("Skipping constant argument %s", spec.arg)
continue
value_name = spec.arg.name
output_kind = spec.kind
value = values[value_name]
if not isinstance(value, (ir.Value, Sequence)):
raise TypeError(
f"Output '{value_name}' should be an ir.Value. Actual type is '{type(value)}': {value!r}. "
"This may be due to an incorrect implementation of the ONNX function that produced this output."
)
# The output value may be a sequence, meaning the operator has multiple outputs
_values = (value,) if not isinstance(value, Sequence) else value
if len(_values) > 1:
logger.warning(
"Model output '%s' has multiple values: %s (output spec: %s). Please make sure this is expected.",
value_name,
_values,
spec,
)
for value in _values:
value.metadata_props["pkg.torch.export.graph_signature.OutputSpec.kind"] = (
output_kind.name
)
if output_kind == graph_signature.OutputKind.USER_OUTPUT:
model.graph.outputs.append(value)
# 4. Rename the initializers to match the tensor names
for name, param_name in itertools.chain(
exported_program.graph_signature.inputs_to_parameters.items(),
exported_program.graph_signature.inputs_to_buffers.items(),
exported_program.graph_signature.inputs_to_lifted_tensor_constants.items(),
):
initializer = model.graph.initializers.pop(name)
initializer.name = param_name
# Record the original name so users can search the metadata and correspond
# with the FX graph
initializer.metadata_props["pkg.torch.onnx.original_node_name"] = name
model.graph.initializers[param_name] = initializer
# 5. Add initializers to the graph
# ExportedProgram stores parameters and buffers in state_dict,
# but non_persistent_buffers and lifted_tensor_constants are not there
# so we need to get them from the name_* apis.
for name, torch_tensor in itertools.chain(
exported_program.named_parameters(),
exported_program.named_buffers(),
exported_program.constants.items(),
):
initializer = model.graph.initializers.get(name) # type: ignore[assignment]
if initializer is None:
logger.warning("Tensor '%s' is not one of the initializers", name)
continue
if not isinstance(torch_tensor, torch.Tensor):
raise NotImplementedError(
f"Tensor '{name}' should be a torch.Tensor. Actual type is '{type(torch_tensor)}': {torch_tensor!r}. "
"This is unexpected and not yet supported."
)
ir_tensor = TorchTensor(torch_tensor, name=name)
initializer.const_value = ir_tensor
_set_shape_type(
initializer,
torch_tensor,
complex_to_float=lower != "none",
)
# TODO: Decide if we should keep mutated buffers as inputs/outputs
# TODO(justinchuby): Remove the hack
_ir_passes.add_torchlib_common_imports(model)
# Collect and add opset imports to the model
_ir_passes.add_opset_imports(model)
return _onnx_program.ONNXProgram(model, exported_program)
def _verbose_printer(verbose: bool | None) -> Callable[..., None]:
"""Prints messages based on `verbose`."""
if verbose is False:
return lambda *_, **__: None
return lambda *args, **kwargs: print("[torch.onnx]", *args, **kwargs)
def export(
model: torch.nn.Module
| torch.export.ExportedProgram
| torch.fx.GraphModule
| torch.jit.ScriptModule
| torch.jit.ScriptFunction,
args: tuple[Any, ...] = (),
kwargs: dict[str, Any] | None = None,
*,
registry: _registration.ONNXRegistry | None = None,
dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None = None,
input_names: Sequence[str] | None = None,
output_names: Sequence[str] | None = None,
report: bool = False,
verify: bool = False,
profile: bool = False,
dump_exported_program: bool = False,
artifacts_dir: str | os.PathLike = ".",
verbose: bool | None = None,
) -> _onnx_program.ONNXProgram:
"""Export a PyTorch model to ONNXProgram.
Args:
model: The model to export. This can be a PyTorch nn.Module or an ExportedProgram.
args: The arguments to pass to the model.
kwargs: The keyword arguments to pass to the model.
registry: The registry of all ONNX decompositions.
dynamic_shapes: Dynamic shapes in the graph.
input_names: If provided, rename the inputs.
output_names: If provided, rename the outputs.
report: Whether to generate an error report if the export fails.
verify: Whether to verify the ONNX model after exporting.
profile: Whether to profile the export process. When report is True,
the profile result will be saved in the report. Otherwise, the profile
result will be printed.
dump_exported_program: Whether to save the exported program to a file.
artifacts_dir: The directory to save the exported program and error reports.
verbose: Whether to print verbose messages. If None (default), some messages will be printed.
Returns:
The ONNXProgram with the exported IR graph.
Raises:
TorchExportError: If the export process fails with torch.export.
ConversionError: If the ExportedProgram to ONNX translation fails.
"""
# Set up the error reporting facilities
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
profiler = _maybe_start_profiler(profile)
# Create the artifacts directory if it does not exist
artifacts_dir = pathlib.Path(artifacts_dir)
if report or profile or dump_exported_program:
artifacts_dir.mkdir(parents=True, exist_ok=True)
verbose_print = _verbose_printer(verbose)
export_status = _reporting.ExportStatus()
failed_results: list[_capture_strategies.Result] = []
program: torch.export.ExportedProgram | None = None
capture_strategy: str | None = None
# Step 1: Export the model with torch.export.export if the model is not already an ExportedProgram
if isinstance(model, torch.export.ExportedProgram):
# We know the model is already exported program, so the args, kwargs, and dynamic_shapes
# are not used.
program = model
export_status.torch_export = True
else:
# Convert an nn.Module to an ExportedProgram
# Try everything 🐰 (all paths for getting an ExportedProgram)
# When input is a JIT module, the last strategy will succeed so it is handled
result: _capture_strategies.Result | None = None
for strategy_class in _capture_strategies.CAPTURE_STRATEGIES:
strategy = strategy_class( # type: ignore[abstract]
verbose=verbose is not False, # Treat None as verbose
dump=dump_exported_program,
artifacts_dir=artifacts_dir,
timestamp=timestamp,
)
result = strategy(model, args, kwargs, dynamic_shapes=dynamic_shapes)
# Record the status
if strategy_class is _capture_strategies.TorchExportNonStrictStrategy:
export_status.torch_export_non_strict = result.success
elif strategy_class is _capture_strategies.TorchExportStrategy:
export_status.torch_export = result.success
elif strategy_class is _capture_strategies.JitTraceConvertStrategy:
export_status.torch_jit = result.success
if result.exported_program is not None:
program = result.exported_program
break
else:
failed_results.append(result)
assert result is not None
capture_strategy = result.strategy
if result.exported_program is None:
# If all strategies fail, produce an error report and raise the first error
profile_result = _maybe_stop_profiler_and_get_result(profiler)
if report:
report_path = artifacts_dir / _reporting.construct_report_file_name(
timestamp, export_status
)
try:
_reporting.create_torch_export_error_report(
report_path,
_format_exceptions_for_all_strategies(failed_results),
export_status=export_status,
profile_result=profile_result,
)
except Exception as e_report:
verbose_print(
f"Failed to save error report due to an error: {e_report}"
)
else:
report_path = None
first_error = failed_results[0].exception
assert first_error is not None
# NOTE: We only throw the torch.export (first) exception because we want to
# focus on the torch.export.export error. Errors from other strategies like
# torch.jit.trace is due to the fallback and can be confusing to users.
# We save all errors in the error report.
raise _errors.TorchExportError(
_STEP_ONE_ERROR_MESSAGE
+ (
f"\nError report has been saved to '{report_path}'."
if report
else ""
)
+ _summarize_exception_stack(first_error)
) from first_error
assert program is not None
if dump_exported_program:
verbose_print("Dumping ExportedProgram because `dump_exported_program=True`...")
program_path = artifacts_dir / f"onnx_export_{timestamp}.pt2"
try:
torch.export.save(program, program_path)
except Exception as e:
verbose_print(f"Failed to save ExportedProgram due to an error: {e}")
else:
verbose_print(f"ExportedProgram has been saved to '{program_path}'.")
# Step 2: Decompose the exported program and insert type promotion nodes
verbose_print("Run decomposition...")
try:
# Build the ONNX function registry
if registry is None:
registry = _registration.ONNXRegistry.from_torchlib()
# Process the exported program to run decompositions and type promotions etc.
decomposed_program = _prepare_exported_program_for_export(
program, registry=registry
)
except Exception as e:
export_status.decomposition = False
verbose_print("Run decomposition... ❌")
profile_result = _maybe_stop_profiler_and_get_result(profiler)
if report:
report_path = artifacts_dir / _reporting.construct_report_file_name(
timestamp, export_status
)
# Run the analysis to get the error report
try:
_reporting.create_onnx_export_report(
report_path,
f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}",
program,
export_status=export_status,
profile_result=profile_result,
registry=registry,
)
except Exception:
logger.exception("Failed to save report due to an error.")
else:
report_path = None
raise _errors.ConversionError(
_STEP_TWO_ERROR_MESSAGE
+ (f"\nError report has been saved to '{report_path}'." if report else "")
+ _summarize_exception_stack(e)
) from e
else:
export_status.decomposition = True
verbose_print("Run decomposition... ✅")
# Step 3: Translate the decomposed program to ONNX and produce ONNXProgram
verbose_print("Translate the graph into ONNX...")
if report or profile:
pre_decomp_unique_ops, post_decomp_unique_ops = _analysis.compare_ops(
program, decomposed_program
)
else:
pre_decomp_unique_ops = None
post_decomp_unique_ops = None
try:
# Convert the exported program to an ONNX model
onnx_program = _exported_program_to_onnx_program(
decomposed_program, registry=registry
)
# Record the strategy used for getting the exported program for unit test assertions
onnx_program._capture_strategy = capture_strategy
# Run the ONNX passes
if input_names:
_ir_passes.rename_inputs(onnx_program.model, input_names)
if output_names:
_ir_passes.rename_outputs(onnx_program.model, output_names)
export_status.onnx_translation = True
verbose_print("Translate the graph into ONNX... ✅")
except Exception as e:
export_status.onnx_translation = False
verbose_print("Translate the graph into ONNX... ❌")
profile_result = _maybe_stop_profiler_and_get_result(profiler)
if report:
report_path = artifacts_dir / _reporting.construct_report_file_name(
timestamp, export_status
)
try:
assert pre_decomp_unique_ops is not None
assert post_decomp_unique_ops is not None
# Run the analysis to get the error report
_reporting.create_onnx_export_report(
report_path,
f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}",
decomposed_program,
decomp_comparison=_reporting.format_decomp_comparison(
pre_decomp_unique_ops, post_decomp_unique_ops
),
export_status=export_status,
profile_result=profile_result,
registry=registry,
)
verbose_print(f"Export report has been saved to '{report_path}'.")
except Exception:
logger.exception("Failed to save report due to an error.")
else:
report_path = None
raise _errors.ConversionError(
_STEP_THREE_ERROR_MESSAGE
+ (f"\nError report has been saved to '{report_path}'." if report else "")
+ _summarize_exception_stack(e)
) from e
profile_result = _maybe_stop_profiler_and_get_result(profiler)
assert onnx_program.exported_program is not None
if not verify:
# Return if verification is not requested
if report:
try:
assert pre_decomp_unique_ops is not None
assert post_decomp_unique_ops is not None
report_path = artifacts_dir / _reporting.construct_report_file_name(
timestamp, export_status
)
_reporting.create_onnx_export_report(
report_path,
"No errors"
if not failed_results
else _format_exceptions_for_all_strategies(failed_results),
onnx_program.exported_program,
decomp_comparison=_reporting.format_decomp_comparison(
pre_decomp_unique_ops, post_decomp_unique_ops
),
export_status=export_status,
profile_result=profile_result,
model=onnx_program.model,
registry=registry,
)
verbose_print(f"Export report has been saved to '{report_path}'.")
except Exception:
logger.exception("Failed to save report due to an error.")
elif profile and profile_result is not None:
verbose_print("Profile result:")
verbose_print(profile_result)
return onnx_program
# Step 4: (verify=True) Check the ONNX model with ONNX checker
try:
verbose_print("Check the ONNX model...")
onnxscript_apis.check_model(onnx_program.model)
export_status.onnx_checker = True
verbose_print("Check the ONNX model... ✅")
except Exception as e:
export_status.onnx_checker = False
verbose_print("Check the ONNX model... ❌")
if report:
try:
assert pre_decomp_unique_ops is not None
assert post_decomp_unique_ops is not None
report_path = artifacts_dir / _reporting.construct_report_file_name(
timestamp, export_status
)
_reporting.create_onnx_export_report(
report_path,
f"{_format_exceptions_for_all_strategies(failed_results)}\n\n{_format_exception(e)}",
onnx_program.exported_program,
decomp_comparison=_reporting.format_decomp_comparison(
pre_decomp_unique_ops, post_decomp_unique_ops
),
export_status=export_status,
profile_result=profile_result,
model=onnx_program.model,
registry=registry,
)
verbose_print(f"Export report has been saved to '{report_path}'.")
except Exception:
logger.exception("Failed to save report due to an error.")
logger.warning(
"Conversion successful but the ONNX model fails ONNX checker. " # noqa: G004
"Please create an issue "
f"in the PyTorch GitHub repository against the {_BLUE}*onnx*{_END} component and "
"attach the full error stack as well as reproduction scripts. ",
exc_info=e,
)
return onnx_program
# Step 5: (verify=True) Execute the model with ONNX Runtime
try:
verbose_print("Execute the model with ONNX Runtime...")
verification_results = _verification.verify_onnx_program(onnx_program)
verbose_print("Execute the model with ONNX Runtime... ✅")
export_status.onnx_runtime = True
onnx_runtime_error_message = None
except Exception as e:
verbose_print("Execute the model with ONNX Runtime... ❌")
export_status.onnx_runtime = False
onnx_runtime_error_message = _format_exception(e)
verification_message = None
else:
# Step 6: (verify=True) Validate the output values
verbose_print("Verify output accuracy...")
export_status.output_accuracy = True
for verification_result in verification_results:
# TODO(justinchuby): The threshold is arbitrary right now
if verification_result.max_abs_diff >= 5e-3:
logger.warning(
"Output '%s' has a large absolute difference of %f. ",
verification_result.name,
verification_result.max_abs_diff,
)
export_status.output_accuracy = False
if verification_result.max_rel_diff >= 1e-1:
logger.warning(
"Output '%s' has a large relative difference of %f. ",
verification_result.name,
verification_result.max_rel_diff,
)
export_status.output_accuracy = False
if export_status.output_accuracy:
verbose_print("Verify output accuracy... ✅")
else:
verbose_print("Verify output accuracy... ❌")
verification_message = _reporting.format_verification_infos(
verification_results
)
if report:
try:
assert pre_decomp_unique_ops is not None
assert post_decomp_unique_ops is not None
traceback_lines = []
if failed_results:
traceback_lines.append(
_format_exceptions_for_all_strategies(failed_results)
)
if onnx_runtime_error_message:
traceback_lines.append("# ⚠️ ONNX Runtime error -----------------------")
traceback_lines.append(onnx_runtime_error_message)
if not traceback_lines:
traceback_lines.append("No errors")
report_path = artifacts_dir / _reporting.construct_report_file_name(
timestamp, export_status
)
_reporting.create_onnx_export_report(
report_path,
"\n\n".join(traceback_lines),
onnx_program.exported_program,
profile_result=profile_result,
export_status=export_status,
decomp_comparison=_reporting.format_decomp_comparison(
pre_decomp_unique_ops, post_decomp_unique_ops
),
model=onnx_program.model,
registry=registry,
verification_result=verification_message,
)
verbose_print(f"Export report has been saved to '{report_path}'.")
except Exception:
logger.exception("Failed to save report due to an error.")
# Release the inference session created during verification
onnx_program.release()
return onnx_program
```
|
================================================================================================================================
SOURCE CODE FILE: _decomp.py
LINES: 1
SIZE: 3.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_decomp.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import itertools
from typing import Callable, TYPE_CHECKING
import torch
import torch._ops
if TYPE_CHECKING:
from torch.onnx._internal.exporter import _registration
def get_onnx_implemented_overloads(
registry: _registration.ONNXRegistry,
) -> list[torch._ops.OperatorBase]:
"""
Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations.
Args:
registry: The ONNX registry for PyTorch.
Returns:
A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations.
"""
registered_ops: list[torch._ops.OperatorBase] = []
for op_namespace in (torch.ops.aten, torch.ops.prims):
op_names = dir(op_namespace)
for op_name in op_names:
op_overload_packet = getattr(op_namespace, op_name)
if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket):
continue
for overload_name in op_overload_packet.overloads():
op_overload = getattr(op_overload_packet, overload_name)
if registry.is_registered(op_overload):
registered_ops.append(op_overload)
return registered_ops
def create_onnx_friendly_decomposition_table(
onnx_registered_ops: set[torch._ops.OperatorBase],
) -> dict[torch._ops.OperatorBase, Callable]:
"""
This function creates a dictionary of op overloads and their decomposition functions
for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function,
its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's
built-in aten-to-aten decomposition.
Args:
onnx_registered_ops: All ops that have an ONNX decomposition implemented.
Returns:
Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding
decomposition functions.
"""
decomposition_table: dict[torch._ops.OperatorBase, Callable] = {}
for op_overload, decomp_fn in itertools.chain(
torch.export.default_decompositions().items(), # type: ignore[attr-defined]
torch._decomp.decomposition_table.items(), # type: ignore[attr-defined]
):
# Skip decomposition for op_overload as long as that op_overload has a corresponding ONNX
# symbolic function.
# NOTE: Do not skip torch._refs decomps. They are fine because otherwise the model is
# not exportable anyways.
if op_overload in onnx_registered_ops:
continue
# If it is HOP, we filter those out as well.
if not hasattr(op_overload, "_schema"):
continue
# NOTE: torch._decomp.decomposition_table covers more ops
# than torch.export.default_decompositions, but the latter is
# more critical to torch.onnx.export.
if op_overload in decomposition_table:
continue
decomposition_table[op_overload] = decomp_fn
return decomposition_table
```
|
=====================================================================================================================================
SOURCE CODE FILE: _dispatching.py
LINES: 3
SIZE: 14.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_dispatching.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import logging
from collections.abc import Sequence
from typing import Any, Callable
from onnxscript import ir
import torch
import torch.fx
from torch.onnx._internal.exporter import _registration, _schemas
logger = logging.getLogger(__name__)
# Define utilities to convert PyTorch data types so users do not need to specify manually
_TORCH_DTYPE_TO_ONNX_COMPATIBLE: dict[torch.dtype, ir.DataType] = {
torch.bfloat16: ir.DataType.BFLOAT16,
torch.bool: ir.DataType.BOOL,
torch.complex128: ir.DataType.DOUBLE,
torch.complex64: ir.DataType.FLOAT,
torch.float16: ir.DataType.FLOAT16,
torch.float32: ir.DataType.FLOAT,
torch.float64: ir.DataType.DOUBLE,
torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN,
torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ,
torch.float8_e5m2: ir.DataType.FLOAT8E5M2,
torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ,
torch.int16: ir.DataType.INT16,
torch.int32: ir.DataType.INT32,
torch.int64: ir.DataType.INT64,
torch.int8: ir.DataType.INT8,
torch.uint8: ir.DataType.UINT8,
}
def _torch_dtype_to_onnx_compatible_dtype(dtype: torch.dtype) -> ir.DataType:
return _TORCH_DTYPE_TO_ONNX_COMPATIBLE[dtype]
def _attribute_type_compatible_with_arg(
attr: _schemas.AttributeParameter,
value: ir.Value | int | float | bool | Sequence[int] | Sequence[float] | None,
) -> bool:
"""Check if the attribute type is compatible with the argument."""
if isinstance(value, bool):
return attr.type is ir.AttributeType.INT
if isinstance(value, str):
return attr.type is ir.AttributeType.STRING
if isinstance(value, int):
return attr.type in {ir.AttributeType.INT, ir.AttributeType.FLOAT}
if isinstance(value, float):
return attr.type is ir.AttributeType.FLOAT
if isinstance(value, complex):
return False
if isinstance(value, Sequence):
if attr.type is ir.AttributeType.INTS:
return all(isinstance(i, int) for i in value)
if attr.type is ir.AttributeType.FLOATS:
return all(isinstance(i, (int, float)) for i in value)
if isinstance(value, torch.dtype):
return attr.type is ir.AttributeType.INT
if isinstance(value, (torch.device, torch.memory_format, torch.layout)):
return attr.type is ir.AttributeType.STRING
if value is None and not attr.required:
# An optional attribute is not supplied
return True
return False
def _param_type_compatible_with_arg(
param: _schemas.Parameter,
value: ir.TypeProtocol
| str
| int
| float
| complex
| Sequence[int]
| Sequence[float]
| None,
assigned_types: dict[str, ir.TypeProtocol],
) -> bool:
# Handle Python types first
if isinstance(value, bool): # noqa: SIM102
if param.type_constraint.allowed_types & {ir.TensorType(ir.DataType.BOOL)}:
return True
if isinstance(value, int) and param.type_constraint.allowed_types & {
ir.TensorType(ir.DataType.INT4),
ir.TensorType(ir.DataType.INT8),
ir.TensorType(ir.DataType.INT16),
ir.TensorType(ir.DataType.INT32),
ir.TensorType(ir.DataType.INT64),
# Int inputs can be casted to a float too
ir.TensorType(ir.DataType.FLOAT8E4M3FN),
ir.TensorType(ir.DataType.FLOAT8E4M3FNUZ),
ir.TensorType(ir.DataType.FLOAT8E5M2),
ir.TensorType(ir.DataType.FLOAT8E5M2FNUZ),
ir.TensorType(ir.DataType.FLOAT16),
ir.TensorType(ir.DataType.FLOAT),
ir.TensorType(ir.DataType.DOUBLE),
}:
return True
if isinstance(value, float) and param.type_constraint.allowed_types & {
ir.TensorType(ir.DataType.FLOAT8E4M3FN),
ir.TensorType(ir.DataType.FLOAT8E4M3FNUZ),
ir.TensorType(ir.DataType.FLOAT8E5M2),
ir.TensorType(ir.DataType.FLOAT8E5M2FNUZ),
ir.TensorType(ir.DataType.FLOAT16),
ir.TensorType(ir.DataType.FLOAT),
ir.TensorType(ir.DataType.DOUBLE),
}:
return True
if isinstance(value, complex) and param.type_constraint.allowed_types & {
ir.TensorType(ir.DataType.FLOAT),
ir.TensorType(ir.DataType.DOUBLE),
ir.TensorType(ir.DataType.COMPLEX64),
ir.TensorType(ir.DataType.COMPLEX128),
}:
return True
if isinstance(value, str): # noqa: SIM102
if param.type_constraint.allowed_types & {ir.TensorType(ir.DataType.STRING)}:
return True
if isinstance(value, (list, tuple)):
if param.type_constraint.allowed_types & {
ir.TensorType(ir.DataType.INT32),
ir.TensorType(ir.DataType.INT64),
ir.TensorType(ir.DataType.FLOAT),
ir.TensorType(ir.DataType.DOUBLE),
ir.SequenceType(ir.TensorType(ir.DataType.INT32)),
ir.SequenceType(ir.TensorType(ir.DataType.INT64)),
ir.SequenceType(ir.TensorType(ir.DataType.FLOAT)),
ir.SequenceType(ir.TensorType(ir.DataType.DOUBLE)),
} and all(isinstance(i, (int)) for i in value):
# We will just allow any fx node and trust that the overload handles it
return True
if param.type_constraint.allowed_types & {
ir.TensorType(ir.DataType.FLOAT),
ir.TensorType(ir.DataType.DOUBLE),
ir.SequenceType(ir.TensorType(ir.DataType.FLOAT)),
ir.SequenceType(ir.TensorType(ir.DataType.DOUBLE)),
} and all(isinstance(i, (int, float)) for i in value):
# We will just allow any fx node and trust that the overload handles it
return True
if value is None and not param.required:
# An optional parameter is not supplied
return True
if not isinstance(value, ir.TypeProtocol):
return False
# Then check tensor types
if param.type_constraint.name in assigned_types:
# If a typevar is already bound, check if the value has the same type
assigned_type = assigned_types[param.type_constraint.name]
return assigned_type == value
# If the typevar is not bound, bind it to the value type
if value in param.type_constraint.allowed_types:
# TODO: Maybe just check dtype? Being more strict here for now
assigned_types[param.type_constraint.name] = value
return True
return False
def _get_type_from_tensor(
tensor: torch.Tensor
| torch.SymBool
| torch.SymInt
| torch.SymFloat
| Sequence[torch.Tensor],
) -> ir.TypeProtocol:
if isinstance(tensor, torch.Tensor):
return ir.TensorType(_torch_dtype_to_onnx_compatible_dtype(tensor.dtype))
if isinstance(tensor, torch.SymBool):
return ir.TensorType(ir.DataType.BOOL)
if isinstance(tensor, torch.SymInt):
return ir.TensorType(ir.DataType.INT64)
if isinstance(tensor, torch.SymFloat):
return ir.TensorType(ir.DataType.FLOAT)
# Handle sequences
first_tensor = next((item for item in tensor if item is not None), None)
if first_tensor is None:
return ir.SequenceType(ir.TensorType(ir.DataType.UNDEFINED))
return ir.SequenceType(
ir.TensorType(_torch_dtype_to_onnx_compatible_dtype(first_tensor.dtype))
)
def _get_first_tensor_in_node_list(
nodes: Sequence[torch.fx.Node | Any],
) -> torch.Tensor | None:
for node in nodes:
if (
isinstance(node, torch.fx.Node)
and "val" in node.meta
and isinstance(node.meta["val"], torch.Tensor)
):
return node.meta["val"]
return None
def _get_named_fx_node_args(node: torch.fx.Node) -> dict[str, torch.fx.node.Argument]:
assert hasattr(node.target, "_schema")
torch_schema: torch.FunctionSchema = node.target._schema # type: ignore[union-attr]
node_args = {}
for arg, schema_arg in zip(node.args, torch_schema.arguments):
node_args[schema_arg.name] = arg
node_args.update(node.kwargs)
return node_args
def get_matching_overload(
node: torch.fx.Node,
overloads: Sequence[_registration.OnnxDecompMeta],
) -> tuple[Callable | None, str]:
"""Get the overload that matches the node's arguments.
Args:
node: The node to match.
overloads: The OnnxDecompMeta with overloads and their signatures to match against.
Returns:
A tuple containing the matched overload and a string describing the reason for failure or success.
"""
if not hasattr(node.target, "_schema"):
# FIXME(justinchuby): When the target is a builtin, we should instead
# Match only the inputs positionally. Figure out how to do that as right
# now we assume all inputs are named.
return overloads[
0
].onnx_function, "The node target does not have a schema. Return the first one."
named_args = _get_named_fx_node_args(node)
# FIXME: Handle when we don't know the names of the arguments
schema_args: dict[str, torch.Argument] = {
arg.name: arg
for arg in node.target._schema.arguments # type: ignore[union-attr]
}
failure_messages: list[str] = []
for overload in overloads:
assigned_types: dict[str, ir.TypeProtocol] = {}
fail_reason = ""
if overload.signature is None:
# When an overload does not have a signature, we assume it is a custom op and should be matched
return (
overload.onnx_function,
"The overload does not have a signature. Assuming it is a custom op and matching it.",
)
for param in overload.signature:
if param.name not in schema_args and param.required:
# We don't need to handle variadic inputs as there is none.
# A required parameter is not supplied.
fail_reason = "Required parameter not supplied"
break
# Get the argument
if param.name in named_args:
# Provided in Node args
arg = named_args[param.name]
elif (
param.name in schema_args
and schema_args[param.name].has_default_value()
):
# Provided in schema args
arg = schema_args[param.name].default_value
elif param.has_default():
# Provided in the ONNX op definition
arg = param.default # type: ignore[assignment]
else:
fail_reason = "Parameter not provided"
break
if isinstance(param, _schemas.Parameter):
if isinstance(arg, torch.Tensor):
arg = _get_type_from_tensor(arg) # type: ignore[assignment]
if isinstance(arg, (list, tuple)) and any(
isinstance(t, torch.fx.Node) for t in arg
):
first_tensor = _get_first_tensor_in_node_list(arg) # type: ignore[arg-type]
assert first_tensor is not None
# FIXME: Handle symfloat here
arg = ir.SequenceType(_get_type_from_tensor(first_tensor)) # type: ignore[assignment]
elif isinstance(arg, torch.fx.Node):
meta_val = arg.meta["val"]
arg = _get_type_from_tensor(meta_val) # type: ignore[assignment]
# TODO: Handle None attributes
# FIXME: Handle symfloat etc.
# Handle tensors and Python values
if not _param_type_compatible_with_arg(param, arg, assigned_types): # type: ignore[arg-type]
fail_reason = (
f"Parameter type not compatible with argument: param=`{param}`, "
f"assigned_types=`{assigned_types}`, arg=`{arg}`"
)
break
elif isinstance(param, _schemas.AttributeParameter):
if not _attribute_type_compatible_with_arg(param, arg): # type: ignore[arg-type]
fail_reason = f"Attribute type not compatible with argument: param=`{param}`, arg=`{arg}`"
break
else:
raise TypeError(f"Unknown parameter type: {type(param)}")
if not fail_reason:
return overload.onnx_function, "Successfully matched overload"
else:
failure_messages.append(
f"- Failed to match overload `{overload}`: {fail_reason}"
)
return (
None,
f"All overloads did not match the node `{node.format_node()}`.\n"
+ "\n".join(failure_messages),
)
def _arg_has_complex_dtype(arg) -> bool:
"""Check if the node has complex dtype recursively."""
if (
isinstance(arg, torch.fx.Node)
and "val" in arg.meta
and isinstance(arg.meta["val"], torch.Tensor)
and torch.is_complex(arg.meta["val"])
):
return True
elif isinstance(arg, list):
return any(_arg_has_complex_dtype(item) for item in arg)
return False
def dispatch(
node: torch.fx.Node, registry: _registration.ONNXRegistry
) -> tuple[Callable | None, str]:
"""Dispatch a node to an ONNX function based on the node's target and the ONNX registry.
Args:
node: The node to dispatch.
registry: The ONNX registry to use for dispatching.
Returns:
A tuple containing the matched ONNX function and a string describing the reason for failure or success.
"""
# TODO: Handle when node does not have a target
decomp_metas = registry.get_decomps(node.target) # type: ignore[arg-type]
# Determine if the node has complex inputs.
is_complex = any(_arg_has_complex_dtype(arg) for arg in node.args) or any(
_arg_has_complex_dtype(arg) for arg in node.kwargs.values()
)
if is_complex:
decomp_metas = [decomp for decomp in decomp_metas if decomp.is_complex]
if not decomp_metas:
return None, "No decompositions registered for the complex-valued input"
else:
decomp_metas = [decomp for decomp in decomp_metas if not decomp.is_complex]
if not decomp_metas:
return None, "No decompositions registered for the real-valued input"
if len(decomp_metas) == 1:
return (
decomp_metas[0].onnx_function,
"Fast path: Only one decomposition is defined",
)
overload, message = get_matching_overload(node, decomp_metas)
return overload, message
```
|
========================================================================================================================================
SOURCE CODE FILE: _dynamic_shapes.py
LINES: 1
SIZE: 14.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_dynamic_shapes.py
ENCODING: utf-8
```py
"""Compatibility functions for the torch.onnx.export API."""
# mypy: allow-untyped-defs
from __future__ import annotations
import inspect
import warnings
from typing import Any, TYPE_CHECKING
import torch
from torch.export.dynamic_shapes import _Dim, _DimHint
from torch.onnx._internal._lazy_import import onnxscript_ir as ir
from torch.utils import _pytree
if TYPE_CHECKING:
from collections.abc import Sequence
def from_dynamic_axes_to_dynamic_shapes(
model,
args: tuple[Any, ...],
kwargs: dict[str, Any] | None,
*,
dynamic_axes=None,
output_names: set[str],
input_names: Sequence[str] | None = None,
) -> tuple[dict[str, Any | None] | None, tuple[Any, ...], dict[str, Any] | None]:
"""
Converts dynamic_axes into dynamic_shapes by wrapping the axis names with ``torch.export.Dim.AUTO``.
dynamic_axes examples:
(1) dynamic_axes = {"x": {0: "my_custom_axis_name_1"}, "y": {1: "my_custom_axis_name_2"}}
(2) dynamic_axes = {"x": [0], "y": [1]}
these will be converted to dynamic_shapes respectively:
(1) dynamic_shapes = {"x": {0: Dim.AUTO}, "y": {1: Dim.AUTO}}
(2) dynamic_shapes = {"x": {0: Dim.AUTO}, "y": {1: Dim.AUTO}}
Detail on Dim.AUTO: `#133620 <https://github.com/pytorch/pytorch/pull/133620>`_
"""
# https://github.com/pytorch/pytorch/pull/128371
# 1. The function does not need to provide dynamic_shapes to torch.export.export
if dynamic_axes is None:
return None, args, kwargs
if input_names is None:
input_names = []
if kwargs is None:
kwargs = {}
dynamic_shapes: dict[str, Any | None] = {}
for input_name, axes in dynamic_axes.items():
# NOTE: torch.export.Dim.AUTO does its best to infer the min and max values
# from the model, but it's not guaranteed to be dynamic.
if input_name in output_names:
# output names are not needed for dynamic_shapes
continue
if isinstance(axes, dict):
if any(not isinstance(k, int) for k in axes.keys()):
raise ValueError(
"The axis in dynamic_axes must be in the form of: dict[int, str] or list[int]."
)
dynamic_shapes[input_name] = {
k: torch.export.Dim.AUTO # type: ignore[attr-defined]
for k, _ in axes.items()
}
elif isinstance(axes, list):
if any(not isinstance(k, int) for k in axes):
raise ValueError(
"The axis in dynamic_axes must be in the form of: dict[int, str] or list[int]."
)
dynamic_shapes[input_name] = {k: torch.export.Dim.AUTO for k in axes} # type: ignore[attr-defined]
elif axes is None:
dynamic_shapes[input_name] = None
else:
raise ValueError(
"Unsupported dynamic_axes format. Please provide a dict or a list."
)
for input_name in input_names:
if input_name not in dynamic_shapes:
dynamic_shapes[input_name] = None
# Order the inputs according to the signature of the model
sig = _signature(model)
inputs = []
for idx, param_name in enumerate(sig.parameters):
if idx < len(args):
inputs.append(args[idx])
elif param_name in kwargs:
inputs.append(kwargs[param_name])
# We need tree structure to represent dynamic_shapes
dynamic_shapes = _unflatten_dynamic_shapes_with_inputs_tree(inputs, dynamic_shapes)
# Since the dynamic_shapes are now in the order of the model parameters,
# we need to convert args and kwargs to the order of the model parameters.
return dynamic_shapes, tuple(inputs), {}
def from_dynamic_shapes_to_dynamic_axes(
dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any],
input_names: Sequence[str],
exception: Exception,
) -> dict[str, Any] | None:
"""
Converts dynamic_shapes into dynamic_axes by removing torch.export.Dim wrapping
and converting to list or dict form based on whether dimension names are present.
dynamic_shapes examples:
(1) dynamic_shapes = {"x": {0: Dim("my_custom_axis_name_1")}, "y": {1: Dim("my_custom_axis_name_2")}}
(2) dynamic_shapes = ({0: Dim("my_custom_axis_name_1"}, {1: Dim("my_custom_axis_name_2")})
these will be converted to dynamic_axes respectively:
(1) dynamic_axes = {"x": [0], "y": [1]}
(2) dynamic_axes = {"x": [0], "y": [1]}
NOTE: If the model input is nested, so is the dynamic_shapes, we need to flatten the dynamic_shapes,
and then assign the axes to the input names in the order they are provided.
NOTE: input_names are used to assign the axes to the correct input names. If the input names are not
provided, or less than the dynamic inputs/axes, it raises an error.
"""
flat_dynamic_shapes, _ = _flatten_dynamic_shapes_to_axes(dynamic_shapes)
if len(input_names) < len(flat_dynamic_shapes):
raise ValueError(
"To construct dynamic_axes from dynamic_shapes, "
f"number of input names ({len(input_names)}) should be greater than or equal to "
f"the number of graph inputs(flat) ({len(flat_dynamic_shapes)})"
) from exception
dynamic_axes: dict[str, list[int]] = {}
# input names are assigned in order
for input_name, axes in zip(input_names, flat_dynamic_shapes):
if axes is None:
continue
converted_axes: list[int] = []
if isinstance(axes, dict):
for axis, dim in axes.items():
if dim is None:
continue
converted_axes.append(axis)
dynamic_axes[input_name] = converted_axes
elif isinstance(axes, (list, tuple)):
for idx, dim in enumerate(axes):
if dim is None:
continue
converted_axes.append(idx)
dynamic_axes[input_name] = converted_axes
return dynamic_axes
def _any_str_or_dim_in_dynamic_shapes(
dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any],
) -> bool:
"""Check if there is any string or _Dim in the dynamic_shapes."""
flat_dynamic_shapes, _ = _flatten_dynamic_shapes_to_axes(dynamic_shapes)
# This indicates the dynamic_shapes includes something we don't support in axes, and it's flattened
# to itself. Otherwise, flat_dynamic_shapes should be a list of dict/list/tuple (or None).
if any(
not isinstance(axes, (dict, list, tuple)) and axes is not None
for axes in flat_dynamic_shapes
):
return False
# both str and _Dim can provide custom names
for axes in flat_dynamic_shapes:
if isinstance(axes, dict):
for dim in axes.values():
if isinstance(dim, (str, _Dim)):
return True
elif isinstance(axes, (list, tuple)):
for dim in axes:
if isinstance(dim, (str, _Dim)):
return True
return False
def convert_str_to_export_dim(
dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any] | None,
) -> tuple[dict[str, Any] | tuple[Any, ...] | list[Any] | None, bool]:
# 1. If there is no string in dynamic_shapes, we do not touch dynamic_shapes
if dynamic_shapes is None or not _any_str_or_dim_in_dynamic_shapes(dynamic_shapes):
return dynamic_shapes, False
# 2. Convert "name" to Dim.AUTO with flattening and identify if there is any string
# to be replaced with Dim.AUTO, and then unflatten it back to the original structure.
# for example: {"y": {0: "dim_0"}, "x": {1: "dim_1"}}
# to {"y": {0: Dim.AUTO}, "x": {1: Dim.AUTO}}
dynamic_shapes_with_export_dim: list[
list[_Dim | _DimHint | None] | dict[int, _Dim | _DimHint | None] | None
] = []
flat_dynamic_shapes, tree_structure = _flatten_dynamic_shapes_to_axes(
dynamic_shapes
)
for axes in flat_dynamic_shapes:
if axes is None:
dynamic_shapes_with_export_dim.append(None)
elif isinstance(axes, dict):
converted_axes_dict: dict[int, _Dim | _DimHint | None] = {}
for axis, dim in axes.items():
if isinstance(dim, str):
converted_axes_dict[axis] = torch.export.Dim.AUTO # type: ignore[attr-defined]
else:
converted_axes_dict[axis] = dim
dynamic_shapes_with_export_dim.append(converted_axes_dict)
elif isinstance(axes, (list, tuple)):
converted_axes_list: list[_Dim | _DimHint | None] = []
for dim in axes:
if isinstance(dim, str):
converted_axes_list.append(torch.export.Dim.AUTO) # type: ignore[attr-defined]
else:
converted_axes_list.append(dim)
dynamic_shapes_with_export_dim.append(converted_axes_list)
dynamic_shapes_with_export_dim = _pytree.tree_unflatten(
dynamic_shapes_with_export_dim, tree_structure
)
return (
dynamic_shapes_with_export_dim,
True,
)
def create_rename_mapping(
inputs, dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any]
) -> dict[str, str]:
"""Create a mapping from old names to new names for dynamic axes."""
# NOTE: There's no need to handle cases where kwargs are out of order with the model signature,
# as torch.export.export supports dynamism only when kwargs and dynamic_shapes are provided in order.
# Reference: https://github.com/pytorch/pytorch/blob/49082f9dba3b79a344cb03652972ddbe7c3729cc/torch/export/_trace.py#L2034
flat_dynamic_shapes, _ = _flatten_dynamic_shapes_to_axes(dynamic_shapes)
if len(inputs) != len(flat_dynamic_shapes):
warnings.warn(
"# ONNX model has different number of inputs than the flatten dynamic_shapes. "
"The dynamic axes will not be renamed.",
UserWarning,
stacklevel=3,
)
return {}
rename_mapping: dict[str, str] = {}
# NOTE: We assume that the flat_dynamic_shapes is in the same order as the inputs
# When the axis is static, or it connects to _DimHint in dynamic shapes, we skip renaming
for idx, axes in enumerate(flat_dynamic_shapes):
input = inputs[idx]
if isinstance(axes, dict):
for dim, axis in axes.items():
if not isinstance(input.shape[dim], ir.SymbolicDim):
continue
old_name = input.shape[dim].value
if old_name is None:
continue
# _DimHint, int and None exists in dynamic shapes, we skip renaming
if isinstance(axis, (_DimHint, int)) or axis is None:
continue
# NOTE: ExportedProgram could give the axes the same name if they share
# the same shape constraints.
custom_name = _get_custom_axis_name(axis)
if input.shape[dim].value in rename_mapping:
warnings.warn(
f"# The axis name: {custom_name} will not be used, since it shares "
f"the same shape constraints with another axis: {rename_mapping[input.shape[dim].value]}."
)
continue
rename_mapping[input.shape[dim].value] = custom_name
elif isinstance(axes, (list, tuple)):
for dim, axis in enumerate(axes):
if not isinstance(input.shape[dim], ir.SymbolicDim):
continue
old_name = input.shape[dim].value
if old_name is None:
continue
# _DimHint, int and None exists in dynamic shapes, we skip renaming
if isinstance(axis, (_DimHint, int)) or axis is None:
continue
# NOTE: ExportedProgram could give the axes the same name if they share
# the same shape constraints.
custom_name = _get_custom_axis_name(axis)
if input.shape[dim].value in rename_mapping:
warnings.warn(
f"# The axis name: {custom_name} will not be used, since it shares "
f"the same shape constraints with another axis: {rename_mapping[input.shape[dim].value]}.",
UserWarning,
stacklevel=3,
)
continue
rename_mapping[input.shape[dim].value] = _get_custom_axis_name(axis)
return rename_mapping
def _get_custom_axis_name(axis: _Dim | str) -> str:
"""Get the custom axis name from a torch.export.Dim."""
if isinstance(axis, _Dim):
return axis.__name__
return axis
def _unflatten_dynamic_shapes_with_inputs_tree(
inputs: list[Any],
dynamic_shapes: dict[str, Any],
) -> dict[str, Any | None]:
_, tree_structure = _pytree.tree_flatten(inputs)
return _pytree.tree_unflatten(dynamic_shapes.values(), tree_structure)
def _flatten_dynamic_shapes_to_axes(
dynamic_shapes: dict[str, Any | None] | tuple[Any, ...] | list[Any],
) -> tuple[list[Any], _pytree.TreeSpec]:
# If it's a dict/list/tuple with torch.export._Dim, we consider it's an axis to dim mapping
def is_axes(x) -> bool:
return (
isinstance(x, dict)
and all(
isinstance(k, int)
and (v is None or isinstance(v, (_Dim, _DimHint, str, int)))
for k, v in x.items()
)
) or (
isinstance(x, (list, tuple))
and all(v is None or isinstance(v, (_Dim, _DimHint, str, int)) for v in x)
)
return _pytree.tree_flatten(dynamic_shapes, is_leaf=is_axes)
def _signature(model) -> inspect.Signature:
should_be_callable = getattr(model, "forward", model)
if callable(should_be_callable):
return inspect.signature(should_be_callable)
raise ValueError("model has no forward method and is not callable")
```
|
================================================================================================================================
SOURCE CODE FILE: _errors.py
LINES: 1
SIZE: 0.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_errors.py
ENCODING: utf-8
```py
"""Error classes for the ONNX exporter."""
from __future__ import annotations
import torch.onnx.errors
class TorchExportError(torch.onnx.errors.OnnxExporterError):
"""Error during graph capturing using torch.export."""
class ConversionError(torch.onnx.errors.OnnxExporterError):
"""Error during ExportedProgram to ONNX conversion."""
class DispatchError(ConversionError):
"""Error during ONNX Function dispatching."""
class GraphConstructionError(ConversionError):
"""Error during ONNX graph construction."""
```
|
===================================================================================================================================
SOURCE CODE FILE: _fx_passes.py
LINES: 1
SIZE: 1.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_fx_passes.py
ENCODING: utf-8
```py
from __future__ import annotations
import torch
import torch.export
import torch.fx
from torch.onnx._internal.exporter import _decomp, _registration
from torch.onnx._internal.fx import diagnostics, passes
def decompose_with_registry(
exported_program: torch.export.ExportedProgram, registry: _registration.ONNXRegistry
) -> torch.export.ExportedProgram:
"""Decompose the exported program with the given registry.
This function is needed so it shows clearly on the profiler results.
"""
onnx_registered_ops = set(_decomp.get_onnx_implemented_overloads(registry))
decomp_table = _decomp.create_onnx_friendly_decomposition_table(onnx_registered_ops)
return exported_program.run_decompositions(decomp_table)
def insert_type_promotion_nodes(
graph_module: torch.fx.GraphModule,
) -> None:
"""Inplace pass to insert explicit type promotion nodes, recursively through nested modules."""
for module in graph_module.modules():
assert isinstance(module, torch.fx.GraphModule)
diagnostic_context = diagnostics.DiagnosticContext(
"torch.onnx.export",
torch.__version__,
)
passes.InsertTypePromotion(diagnostic_context, module).run()
def remove_assertion_nodes(graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
"""Remove all assertion and check nodes from the FX graph"""
aten_assertion_targets = {
torch.ops.aten.sym_constrain_range_for_size.default,
torch.ops.aten._assert_async.default,
torch.ops.aten._assert_async.msg,
torch.ops.aten._assert_scalar.default,
torch.ops.aten._assert_tensor_metadata.default,
}
for node in graph_module.graph.nodes:
if node.op == "call_function" and node.target in aten_assertion_targets:
graph_module.graph.erase_node(node)
graph_module.recompile()
return graph_module
```
|
===================================================================================================================================
SOURCE CODE FILE: _ir_passes.py
LINES: 1
SIZE: 5.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_ir_passes.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING
from torch.onnx._internal._lazy_import import onnxscript_ir as ir
from torch.onnx._internal.exporter import _constants
if TYPE_CHECKING:
from collections.abc import Sequence
# The opset domain for ONNX operators
_ONNX_DOMAIN = ""
logger = logging.getLogger(__name__)
def rename_inputs(model: ir.Model, new_names: Sequence[str]) -> None:
# TODO: Ensure the names do not have duplicates
for input, new_name in zip(model.graph.inputs, new_names):
input.metadata_props["pkg.torch.onnx.original_node_name"] = str(input.name)
input.name = new_name
def rename_outputs(model: ir.Model, new_names: Sequence[str]) -> None:
for output, new_name in zip(model.graph.outputs, new_names):
output.metadata_props["pkg.torch.onnx.original_node_name"] = str(output.name)
output.name = new_name
def _all_values(model: ir.Model):
"""Yield all values in a model."""
# Yield all values in the model
yield from model.graph.inputs
yield from model.graph.initializers.values()
for node in ir.traversal.RecursiveGraphIterator(model.graph):
yield from node.outputs
# Yield all values in functions
for function in model.functions.values():
yield from function.inputs
for node in ir.traversal.RecursiveGraphIterator(function):
yield from node.outputs
def _replace_names(shape_expr: str, rename_mapping: dict[str, str]) -> str:
"""Replace all known names in a shape expression with new names."""
for old_name, new_name in rename_mapping.items():
shape_expr = re.sub(
rf"(?<!\w){re.escape(old_name)}(?!\w)", new_name, shape_expr
)
return shape_expr
def rename_axis(model: ir.Model, rename_mapping: dict[str, str]) -> None:
"""Rename dynamic axes in a model according to the specified dynamic_axes names."""
# NOTE: Mapping needs to be srted by length because the shape expression
# could have multiple ways to be expressed, for example,
# {"s1": sequence_length, "s11": "past_sequence_length", "s1 + s11": "masked_sequence_length"}
# We prefer the replacement starts from the longest match.
sorted_rename_mapping = dict(
sorted(rename_mapping.items(), key=lambda item: len(item[0]), reverse=True)
)
for value in _all_values(model):
if value.shape is None:
continue
new_shape = []
changed = False
for dim in value.shape:
if not isinstance(dim, ir.SymbolicDim):
new_shape.append(dim)
continue
dim_name = dim.value
if dim_name in sorted_rename_mapping:
new_shape.append(sorted_rename_mapping[dim_name])
changed = True
elif dim_name is not None:
# For example: "2*s1", "s1+1", "s1-1", "s1*s2", "s1/s2"
new_name = _replace_names(dim_name, sorted_rename_mapping)
new_shape.append(new_name)
if new_name != dim_name:
changed = True
else:
new_shape.append(None)
if changed:
value.shape = ir.Shape(new_shape)
def add_torchlib_common_imports(model: ir.Model) -> None:
"""Hack to add torchlib common imports to the model."""
try:
# TODO(justinchuby): Remove this hack and improved onnxscript
from onnxscript.function_libs.torch_lib.ops import common as common_ops
model.opset_imports["pkg.onnxscript.torch_lib.common"] = 1
rank_func = ir.serde.deserialize_function(common_ops.Rank.to_function_proto())
is_scalar_func = ir.serde.deserialize_function(
common_ops.IsScalar.to_function_proto()
)
model.functions[rank_func.identifier()] = rank_func
model.functions[is_scalar_func.identifier()] = is_scalar_func
except Exception:
logger.exception("Failed to add torchlib common imports to the model.")
def _maybe_set_opset_version(
opset_imports: dict[str, int], domain: str, version: int | None
) -> None:
"""Set the opset version for the domain."""
if domain in opset_imports and opset_imports[domain] != 1:
# Already set
return
if domain == _ONNX_DOMAIN:
opset_imports[domain] = _constants.TORCHLIB_OPSET
return
if version is None:
# We don't know the opset version, so set it to 1
# This is valid for the custom function domains like "pkg.torch.__subgraph__"
opset_imports[domain] = 1
return
# Set the known opset version for the domain
opset_imports[domain] = version
def add_opset_imports(model: ir.Model) -> None:
"""Collect all opsets used and add opset imports to the model and functions."""
for node in ir.traversal.RecursiveGraphIterator(model.graph):
domain = node.domain
_maybe_set_opset_version(model.opset_imports, domain, node.version)
for function in model.functions.values():
for node in ir.traversal.RecursiveGraphIterator(function):
domain = node.domain
_maybe_set_opset_version(function.opset_imports, domain, node.version)
for domain, version in function.opset_imports.items():
# Add all opsets used in the function to the model, because ONNX Runtime
# does not handle adding the opset imports to the model after inlining during inference.
# This should happen after all opsets are collected for the function from its nodes.
_maybe_set_opset_version(model.opset_imports, domain, version)
```
|
==================================================================================================================================
SOURCE CODE FILE: _isolated.py
LINES: 1
SIZE: 1.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_isolated.py
ENCODING: utf-8
```py
"""Isolated calls to methods that may segfault."""
# mypy: allow-untyped-defs
from __future__ import annotations
import multiprocessing
import os
import warnings
from typing import Callable
_IS_WINDOWS = os.name == "nt"
def _call_function_and_return_exception(func, args, kwargs):
"""Call function and return a exception if there is one."""
try:
return func(*args, **kwargs)
except Exception as e:
return e
def safe_call(func: Callable, *args, **kwargs):
"""Call a function in a separate process.
Args:
func: The function to call.
args: The positional arguments to pass to the function.
kwargs: The keyword arguments to pass to the function.
Returns:
The return value of the function.
Raises:
Exception: If the function raised an exception.
"""
if _IS_WINDOWS:
# On Windows, we cannot create a new process with fork.
warnings.warn(
f"A new process is not created for {func} on Windows.", stacklevel=1
)
return func(*args, **kwargs)
with multiprocessing.get_context("fork").Pool(1) as pool:
# It is important to fork a process here to prevent the main logic from
# running again when the user does not place it under a `if __name__ == "__main__":`
# block.
result = pool.apply_async(
_call_function_and_return_exception, (func, args, kwargs)
)
result = result.get(timeout=5)
if isinstance(result, Exception):
raise result
return result
```
|
======================================================================================================================================
SOURCE CODE FILE: _onnx_program.py
LINES: 1
SIZE: 15.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_onnx_program.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# mypy: disable-error-code="attr-defined,name-defined"
from __future__ import annotations
__all__ = ["ONNXProgram"]
import contextlib
import copy
import gc
import logging
import os
import tempfile
import textwrap
import warnings
from typing import Any, Callable, TYPE_CHECKING
import torch
from torch.onnx._internal._lazy_import import onnx, onnxscript_apis, onnxscript_ir as ir
from torch.onnx._internal.exporter import _dynamic_shapes, _ir_passes
from torch.utils import _pytree
# NOTE: DO NOT import module from torch.onnx._internal to this module in the global scope
# because ONNXProgram is exposed to the public API
if TYPE_CHECKING:
from collections.abc import Sequence
import onnxruntime as ort
_LARGE_MODEL_THRESHOLD = 1536 * 1024 * 1024 # 1536MB
logger = logging.getLogger(__name__)
def _ort_session_initializer(model: str | bytes) -> ort.InferenceSession:
"""Initialize an ONNX Runtime inference session with the specified model."""
import onnxruntime as ort
session_options = ort.SessionOptions()
session_options.log_severity_level = 3 # 3: Error
possible_providers = (
"CUDAExecutionProvider",
"CPUExecutionProvider",
)
available_providers = set(ort.get_available_providers())
providers = [
provider for provider in possible_providers if provider in available_providers
]
return ort.InferenceSession(
model, providers=providers, sess_options=session_options
)
def _count_initializer_size(graph: ir.Graph) -> int:
"""Count the total size of the initializers in bytes."""
return sum(
v.const_value.nbytes
for v in graph.initializers.values()
if v.const_value is not None
)
@contextlib.contextmanager
def _set_graph_outputs(
graph: ir.Graph,
outputs: list[ir.Value],
):
"""Temporarily set the outputs of the graph.
Args:
graph: The graph to set the outputs for.
outputs: The outputs to set.
"""
original_outputs = graph.outputs.copy()
graph.outputs.clear()
graph.outputs.extend(outputs)
try:
yield
finally:
graph.outputs.clear()
graph.outputs.extend(original_outputs)
def _create_value_mapping(graph: ir.Graph) -> dict[str, ir.Value]:
"""Return a dictionary mapping names to values in the graph.
The mapping does not include values from subgraphs.
Args:
graph: The graph to extract the mapping from.
Returns:
A dictionary mapping names to values.
"""
values = {}
values.update(graph.initializers)
# The names of the values can be None or "", which we need to exclude
for input in graph.inputs:
if not input.name:
continue
values[input.name] = input
for node in graph:
for value in node.outputs:
if not value.name:
continue
values[value.name] = value
return values
class ONNXProgram:
"""A class to represent an ONNX program that is callable with torch tensors."""
def __init__(
self, model: ir.Model, exported_program: torch.export.ExportedProgram | None
):
"""Initialize the ONNX program with the specified model and exported program.
Args:
model: The ONNX model.
exported_program: The exported program that produced the ONNX model. Optional.
"""
self.model: ir.Model = model
self.exported_program = exported_program
self._inference_session: ort.InferenceSession | None = None
self._tempdir: tempfile.TemporaryDirectory | None = None
# Strategy used to capture the exported program
self._capture_strategy: str | None = None
def __repr__(self) -> str:
return f"""\
ONNXProgram(
model=
{textwrap.indent(str(self.model), " " * 8)}
,
exported_program=
{textwrap.indent(str(self.exported_program), " " * 8)}
)
"""
def __call__(self, *args, **kwargs) -> Sequence[torch.Tensor]:
"""Run the ONNX model with the same arguments you would provide to the GraphModule."""
import onnxruntime as ort
flatten_args = _process_args(args, kwargs)
if self._inference_session is None:
self.initialize_inference_session()
assert self._inference_session is not None
# We don't expect non-tensor as inputs
ort_input = {
k.name: v.numpy(force=True)
for k, v in zip(self.model.graph.inputs, flatten_args)
}
run_options = ort.RunOptions()
run_options.log_severity_level = 3 # 3: Error
logger.debug("Running the inference session with %s arguments.", len(ort_input))
outputs = self._inference_session.run(None, ort_input, run_options=run_options)
logger.debug("Inference session run completed.")
# TODO(justinchuby): Maybe output complex tensors as needed
return tuple(torch.from_numpy(output) for output in outputs)
def compute_values(
self, value_names: Sequence[str], args=(), kwargs=None
) -> Sequence[torch.Tensor]:
"""Compute the values of the specified names in the ONNX model.
This method is used to compute the values of the specified names in the ONNX model.
The values are returned as a dictionary mapping names to tensors.
Args:
value_names: The names of the values to compute.
Returns:
A dictionary mapping names to tensors.
"""
if kwargs is None:
kwargs = {}
self.release()
values = _create_value_mapping(self.model.graph)
for name in value_names:
if name not in values:
raise ValueError(
f"Value '{name}' not found in the model. "
"Please provide a valid value name."
)
temporary_outputs = [values[name] for name in value_names]
with _set_graph_outputs(self.model.graph, temporary_outputs):
try:
result = self(*args, **kwargs)
finally:
self.release()
return result
@property
def model_proto(self) -> onnx.ModelProto:
"""Return the ONNX ``ModelProto`` object."""
return ir.serde.serialize_model(self.model)
def optimize(self) -> None:
"""Optimize the ONNX model.
This method optimizes the ONNX model by performing constant folding and
eliminating redundancies in the graph. The optimization is done in-place.
"""
self.model = onnxscript_apis.optimize(self.model)
def save(
self,
destination: str | os.PathLike,
*,
include_initializers: bool = True,
keep_initializers_as_inputs: bool = False,
external_data: bool | None = None,
):
"""Save the ONNX model to the specified destination.
When ``external_data`` is ``True`` or the model is larger than 2GB,
the weights are saved as external data in a separate file.
Initializer (model weights) serialization behaviors:
* ``include_initializers=True``, ``keep_initializers_as_inputs=False`` (default):
The initializers are included in the saved model.
* ``include_initializers=True``, ``keep_initializers_as_inputs=True``:
The initializers are included in the saved model and kept as model inputs.
Choose this option if you want the ability to override the model weights
during inference.
* ``include_initializers=False``, ``keep_initializers_as_inputs=False``:
The initializers are not included in the saved model and are not listed
as model inputs. Choose this option if you want to attach the initializers
to the ONNX model in a separate, post-processing, step.
* ``include_initializers=False``, ``keep_initializers_as_inputs=True``:
The initializers are not included in the saved model but are listed as model
inputs. Choose this option if you want to supply the initializers during
inference and want to minimize the size of the saved model.
Args:
destination: The path to save the ONNX model to.
include_initializers: Whether to include the initializers in the saved model.
keep_initializers_as_inputs: Whether to keep the initializers as inputs in the saved model.
If `True`, the initializers are added as inputs to the model which means they can be overwritten.
by providing the initializers as model inputs.
external_data: Whether to save the weights as external data in a separate file.
Raises:
TypeError: If ``external_data`` is ``True`` and ``destination`` is not a file path.
"""
original_initializers = copy.copy(self.model.graph.initializers)
original_inputs = copy.copy(self.model.graph.inputs)
# Adjust the model based on options
if not include_initializers:
self.model.graph.initializers.clear()
if keep_initializers_as_inputs:
self.model.graph.inputs.extend(original_initializers.values()) # type: ignore[arg-type]
# Save the model to disk
if (
external_data
or _count_initializer_size(self.model.graph) > _LARGE_MODEL_THRESHOLD
):
onnxscript_apis.save_model_with_external_data(self.model, destination)
else:
ir.save(self.model, destination)
# Revert the changes to the model
if not include_initializers:
self.model.graph.initializers.update(original_initializers)
if keep_initializers_as_inputs:
self.model.graph.inputs.clear()
self.model.graph.inputs.extend(original_inputs)
def apply_weights(self, state_dict: dict[str, torch.Tensor]) -> None:
"""Apply the weights from the specified state dict to the ONNX model.
Use this method to replace FakeTensors or other weights.
Args:
state_dict: The state dict containing the weights to apply to the ONNX model.
"""
from torch.onnx._internal.exporter import _core
for name, tensor in state_dict.items():
if name in self.model.graph.initializers:
self.model.graph.initializers[name].const_value = _core.TorchTensor(
tensor, name
)
else:
warnings.warn(
f"Weight '{name}' not found in the model. Skipped applying.",
category=torch.onnx.errors.OnnxExporterWarning,
stacklevel=1,
)
def initialize_inference_session(
self,
initializer: Callable[
[str | bytes], ort.InferenceSession
] = _ort_session_initializer,
) -> None:
"""Initialize the ONNX Runtime inference session.
Args:
initializer: The function to initialize the ONNX Runtime inference
session with the specified model. By default, it uses the
:func:`_ort_session_initializer` function.
"""
# TODO(justinchuby): Allow different inference options
logger.debug("Initializing the inference session.")
if (
byte_size := _count_initializer_size(self.model.graph)
) > _LARGE_MODEL_THRESHOLD:
logger.debug("The model initializers is larger than 1.5GB (%s).", byte_size)
# Save the model to a temporary file if too large
self._tempdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)
model_path = os.path.join(self._tempdir.name, "model.onnx")
self.save(model_path, external_data=True)
model = model_path
else:
model = self.model_proto.SerializeToString() # type: ignore[assignment]
self._inference_session = initializer(model)
logger.debug("Inference session initialized.")
def release(self) -> None:
"""Release the inference session.
You may call this method to release the resources used by the inference session.
"""
# Release the inference session first so that the model file can be deleted
if self._inference_session is not None:
self._inference_session = None
gc.collect()
if self._tempdir is not None:
self._tempdir.cleanup()
self._tempdir = None
def _rename_dynamic_axes(
self,
dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any],
) -> None:
"""Rename dynamic axes in a model according to the specified dynamic_axes names."""
rename_mapping = _dynamic_shapes.create_rename_mapping(
self.model.graph.inputs, dynamic_shapes
)
_ir_passes.rename_axis(self.model, rename_mapping)
def _process_args(args, kwargs) -> tuple[torch.Tensor, ...]:
"""Process input arguments for the ONNX model."""
args = _flatten_inputs(args, kwargs)
args = _remove_none_from_inputs(args)
args = _remove_non_tensor(args)
args = _convert_complex_to_real_representation(args)
return args
def _flatten_inputs(model_args, model_kwargs):
flattened_args, _ = _pytree.tree_flatten((model_args, model_kwargs))
return flattened_args
def _remove_none_from_inputs(model_args):
return tuple(arg for arg in model_args if arg is not None)
def _remove_non_tensor(model_args):
"""Remove the non-tensor input arguments.
Dynamo does not support non-tensor input arguments (https://github.com/pytorch/pytorch/issues/99534).
Specifically, it does put the input into graph with an empty node, but consumed by no ones.
The concrete value is embedded into the graph as a constant arg of a target node. Meta
suggests in this case that one should rewrite the model code to make it tensor if the
input value is supposed to change at runtime. We might need to further investigate
the feasibility of that suggestion.
For example,
def func(x, b=1.0):
y = x + b
z = y.relu()
return (y, z)
x = torch.randn(1, 1, 2, dtype=torch.float32)
gm_fun, _ = dynamo.export(func, x, b=8.0, aten_graph=True, tracing_mode="real")
# class GraphModule(torch.nn.Module):
# def forward(self, x, b):
# arg0: f32[1, 1, 2], arg1, = fx_pytree.tree_flatten_spec(([x, b], {}), self._in_spec)
# # File: path/to/pytorch/test_constant_input.py:5, code: y = x + b
# add_tensor: f32[1, 1, 2] = torch.ops.aten.add.Tensor(arg0, 8.0); arg0 = None
# # File: path/to/pytorch/test_constant_input.py:6, code: z = y.relu()
# relu_default: f32[1, 1, 2] = torch.ops.aten.relu.default(add_tensor)
# return pytree.tree_unflatten([add_tensor, relu_default], self._out_spec)
Empty torch.fx.Node input leading to a mismatched number of input with PyTorch, as
it's ignored in ONNX graph. Thus, we delete the useless input here.
"""
return tuple(
arg for arg in model_args if not isinstance(arg, (int, float, bool, str))
)
def _convert_complex_to_real_representation(model_args):
"""Convert complex dtype tensors to real representation tensors.
ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors
to real representation tensors (i.e., float dtype tensors with an extra dimension
representing the real and imaginary parts of the complex number).
"""
return tuple(
torch.view_as_real(arg.resolve_conj())
if isinstance(arg, torch.Tensor) and arg.is_complex()
else arg
for arg in model_args
)
```
|
======================================================================================================================================
SOURCE CODE FILE: _registration.py
LINES: 1
SIZE: 11.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_registration.py
ENCODING: utf-8
```py
"""Module for handling ATen to ONNX functions registration.
https://github.com/pytorch/pytorch/blob/6aa5bb1a76dee8112f1a9e7c194c790b5cdc6462/torch/onnx/_internal/fx/registration.py
"""
# NOTE: Why do we need a different registry than the one in torchlib?
# The registry in torchlib is used to register functions that are already implemented in
# torchlib, and is designed to be a static singleton. It does not take into account custom ops or different
# opsets etc. The registry implemented for the exporter is designed to be modifiable at
# export time by users, and is designed with dispatching in mind.
# mypy: allow-untyped-defs
from __future__ import annotations
import dataclasses
import importlib.util
import logging
import math
import operator
import types
from typing import Callable, Literal, Union
from typing_extensions import TypeAlias
import torch
import torch._ops
from torch.onnx._internal._lazy_import import onnxscript, onnxscript_apis
from torch.onnx._internal.exporter import _constants, _schemas
from torch.onnx._internal.exporter._torchlib import _torchlib_registry
TorchOp: TypeAlias = Union[torch._ops.OpOverload, types.BuiltinFunctionType, Callable]
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class OnnxDecompMeta:
"""A wrapper of onnx-script function with additional metadata.
onnx_function: The onnx-script function from torchlib.
fx_target: The PyTorch node callable target.
signature: The ONNX signature of the function. When None, the signature is inferred.
is_custom: Whether the function is a custom function.
is_complex: Whether the function is a function that handles complex valued inputs.
device: The device the function is registered to. If None, it is registered to all devices.
skip_signature_inference: Whether to skip signature inference for the function.
"""
onnx_function: Callable
fx_target: TorchOp
signature: _schemas.OpSignature | None
is_custom: bool = False
is_complex: bool = False
device: Literal["cuda", "cpu"] | str | None = None # noqa: PYI051
skip_signature_inference: bool = False
def __post_init__(self) -> None:
if self.signature is None and not self.skip_signature_inference:
try:
if isinstance(self.onnx_function, onnxscript.OnnxFunction):
signature = _schemas.OpSignature.from_function( # type: ignore[attr-defined]
self.onnx_function,
self.onnx_function.function_ir.domain,
self.onnx_function.name,
opset_version=self.onnx_function.opset.version,
)
else:
signature = _schemas.OpSignature.from_function(
self.onnx_function, "__traced", self.onnx_function.__name__
)
except Exception as e:
# Log an warning if the op is custom. Raise exception for builtin ops.
if not self.is_custom:
raise
else:
# When the function is targeting an HOP, for example, it will accept
# functions as arguments and fail to generate an ONNX signature.
# In this case we set signature to None and dispatch to this function always.
logger.warning(
"Failed to infer the signature for function '%s' because '%s'"
"All nodes targeting `%s` will be dispatched to this function",
self.onnx_function,
e,
self.fx_target,
)
else:
self.signature = signature
self.onnx_function._pt_onnx_signature = signature # type: ignore[attr-defined]
def _get_overload(qualified_name: str) -> torch._ops.OpOverload | None:
"""Obtain the torch op from <namespace>::<op_name>[.<overload>]"""
# TODO(justinchuby): Handle arbitrary custom ops
namespace, opname_overload = qualified_name.split("::")
op_name, *maybe_overload = opname_overload.split(".", 1)
if namespace == "_operator":
# Builtin functions
return getattr(operator, op_name)
if namespace == "math":
return getattr(math, op_name)
if namespace == "torchvision":
if importlib.util.find_spec("torchvision") is None:
logger.warning("torchvision is not installed. Skipping %s", qualified_name)
return None
try:
op_packet = getattr(getattr(torch.ops, namespace), op_name)
if maybe_overload:
overload = maybe_overload[0]
elif "default" in op_packet._overload_names or "" in op_packet._overload_names:
# Has a default overload
overload = "default"
else:
logger.warning(
"'%s' does not have a 'default' overload. This could be an error in specifying the op name. Ignoring.",
qualified_name,
stacklevel=1,
)
return None
return getattr(op_packet, overload) # type: ignore[call-overload]
except AttributeError:
if qualified_name.endswith("getitem"):
# This is a special case where we registered the function incorrectly,
# but for BC reasons (pt<=2.4) we need to keep it.
return None
logger.info("'%s' is not found in this version of PyTorch.", qualified_name)
return None
except Exception:
logger.exception("Failed to find torch op '%s'", qualified_name)
return None
class ONNXRegistry:
"""Registry for ONNX functions.
The registry maintains a mapping from qualified names to symbolic functions under a
fixed opset version. It supports registering custom onnx-script functions and for
dispatcher to dispatch calls to the appropriate function.
"""
def __init__(self) -> None:
"""Initializes the registry"""
self._opset_version = _constants.TORCHLIB_OPSET
self.functions: dict[TorchOp | str, list[OnnxDecompMeta]] = {}
@property
def opset_version(self) -> int:
"""The ONNX opset version the exporter should target."""
return self._opset_version
@classmethod
def from_torchlib(cls) -> ONNXRegistry:
"""Populates the registry with ATen functions from torchlib.
Args:
torchlib_registry: The torchlib registry to use for populating the registry.
"""
registry = cls()
for meta in _torchlib_registry.get_torchlib_ops():
registry._register(meta.fx_target, meta)
# TODO(justinchuby): Remove this once torchlib is migrated to PyTorch
torchlib_ops = onnxscript_apis.get_torchlib_ops()
for torchlib_meta in torchlib_ops:
qualified_name = torchlib_meta.qualified_name
overload_func = torchlib_meta.function
try:
# NOTE: This is heavily guarded with try-except because we don't want
# to fail the entire registry population if one function fails.
target = _get_overload(qualified_name)
if target is None:
continue
meta = OnnxDecompMeta(
onnx_function=overload_func,
fx_target=target,
signature=None,
is_custom=False,
is_complex=torchlib_meta.is_complex,
)
registry._register(target, meta)
except Exception:
logger.exception("Failed to register '%s'. Skipped", qualified_name)
continue
return registry
def _register(
self,
target: TorchOp,
onnx_decomposition: OnnxDecompMeta,
) -> None:
"""Registers a OnnxDecompMeta to an operator.
Args:
target: The PyTorch node callable target.
onnx_decomposition: The OnnxDecompMeta to register.
"""
target_or_name: str | TorchOp
if isinstance(target, torch._ops.OpOverload):
# Get the qualified name of the aten op because torch._ops.OpOverload lookup in
# a dictionary is unreliable for some reason.
target_or_name = target.name()
else:
target_or_name = target
if onnx_decomposition.is_custom:
self.functions.setdefault(target_or_name, []).insert(0, onnx_decomposition)
else:
self.functions.setdefault(target_or_name, []).append(onnx_decomposition)
def register_op(
self,
target: TorchOp,
function: Callable,
is_complex: bool = False,
) -> None:
"""Registers a custom operator: torch.ops.<namespace>.<op_name>.<overload>.
Args:
target: The PyTorch node callable target.
function: The onnx-script function to register.
is_complex: Whether the function is a function that handles complex valued inputs.
"""
if isinstance(target, torch._ops.OpOverloadPacket):
raise TypeError(
f"Target '{target}' should be provided as an OpOverload instead of an "
"OpOverloadPacket. You can get the default overload with "
"<op>.default"
)
self._register(
target,
OnnxDecompMeta(
onnx_function=function,
fx_target=target,
signature=None,
is_custom=True,
is_complex=is_complex,
),
)
def get_decomps(self, target: TorchOp) -> list[OnnxDecompMeta]:
"""Returns a list of OnnxDecompMeta for the given op: torch.ops.<namespace>.<op_name>.<overload>.
The list is ordered by the time of registration. The custom operators should come
first in the list.
Args:
target: The PyTorch node callable target.
Returns:
A list of OnnxDecompMeta corresponding to the given name, or None if
the name is not in the registry.
"""
target_or_name: str | TorchOp
if isinstance(target, torch._ops.OpOverload):
# Get the qualified name of the aten op because torch._ops.OpOverload lookup in
# a dictionary is unreliable for some reason.
target_or_name = target.name()
else:
target_or_name = target
decomps = self.functions.get(target_or_name, [])
return sorted(decomps, key=lambda x: x.is_custom, reverse=True)
def is_registered(self, target: TorchOp) -> bool:
"""Returns whether the given op is registered: torch.ops.<namespace>.<op_name>.<overload>.
Args:
target: The PyTorch node callable target.
Returns:
True if the given op is registered, otherwise False.
"""
return bool(self.get_decomps(target))
def __repr__(self) -> str:
return f"{self.__class__.__name__}(functions={self.functions})"
```
|
===================================================================================================================================
SOURCE CODE FILE: _reporting.py
LINES: 62
SIZE: 7.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_reporting.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import dataclasses
import re
from typing import TYPE_CHECKING
from torch.onnx._internal.exporter import _analysis, _registration, _verification
if TYPE_CHECKING:
import os
from onnxscript import ir
import torch
@dataclasses.dataclass
class ExportStatus:
# Whether torch.export.export.export() succeeds
torch_export: bool | None = None
# Whether torch.export.export.export(..., strict=False) succeeds
torch_export_non_strict: bool | None = None
# Whether torch.jit.trace succeeds
torch_jit: bool | None = None
# Whether decomposition succeeds
decomposition: bool | None = None
# Whether ONNX translation succeeds
onnx_translation: bool | None = None
# Whether ONNX model passes onnx.checker.check_model
onnx_checker: bool | None = None
# Whether ONNX model runs successfully with ONNX Runtime
onnx_runtime: bool | None = None
# Whether the output of the ONNX model is accurate
output_accuracy: bool | None = None
def _status_emoji(status: bool | None) -> str:
if status is None:
return "⚪"
return "✅" if status else "❌"
def _format_export_status(status: ExportStatus) -> str:
return (
f"```\n"
f"{_status_emoji(status.torch_export_non_strict)} Obtain model graph with `torch.export.export(..., strict=False)`\n"
f"{_status_emoji(status.torch_export)} Obtain model graph with `torch.export.export(..., strict=True)`\n"
f"{_status_emoji(status.torch_jit)} Obtain model graph with `torch.jit.trace`\n"
f"{_status_emoji(status.decomposition)} Decompose operators for ONNX compatibility\n"
f"{_status_emoji(status.onnx_translation)} Translate the graph into ONNX\n"
f"{_status_emoji(status.onnx_checker)} Run `onnx.checker` on the ONNX model\n"
f"{_status_emoji(status.onnx_runtime)} Execute the model with ONNX Runtime\n"
f"{_status_emoji(status.output_accuracy)} Validate model output accuracy\n"
f"```\n\n"
)
def _strip_color_from_string(text: str) -> str:
# This regular expression matches ANSI escape codes
# https://github.com/pytorch/pytorch/blob/9554a9af8788c57e1c5222c39076a5afcf0998ae/torch/_dynamo/utils.py#L2785-L2788
ansi_escape = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]")
return ansi_escape.sub("", text)
def _format_exported_program(exported_program: torch.export.ExportedProgram) -> str:
# Adapted from https://github.com/pytorch/pytorch/pull/128476
# to remove colors
# Even though we can call graph_module.print_readable directly, since the
# colored option was added only recently, we can't guarantee that the
# version of PyTorch used by the user has this option. Therefore, we
# still call str(ExportedProgram)
text = f"```python\n{_strip_color_from_string(str(exported_program))}\n```\n\n"
return text
def construct_report_file_name(timestamp: str, status: ExportStatus) -> str:
# Status could be None. So we need to check for False explicitly.
if not (status.torch_export or status.torch_export_non_strict or status.torch_jit):
# All strategies failed
postfix = "pt_export"
elif status.decomposition is False:
postfix = "decomp"
elif status.onnx_translation is False:
postfix = "conversion"
elif status.onnx_checker is False:
postfix = "checker"
elif status.onnx_runtime is False:
postfix = "runtime"
elif status.output_accuracy is False:
postfix = "accuracy"
elif status.torch_export is False or status.torch_export_non_strict is False:
# Some strategies failed
postfix = "strategies"
else:
postfix = "success"
return f"onnx_export_{timestamp}_{postfix}.md"
def format_decomp_comparison(
pre_decomp_unique_ops: set[str],
post_decomp_unique_ops: set[str],
) -> str:
"""Format the decomposition comparison result.
Args:
unique_ops_in_a: The unique ops in the first program.
unique_ops_in_b: The unique ops in the second program.
Returns:
The formatted comparison result.
"""
return (
f"Ops exist only in the ExportedProgram before decomposition: `{sorted(pre_decomp_unique_ops)}`\n\n"
f"Ops exist only in the ExportedProgram after decomposition: `{sorted(post_decomp_unique_ops)}`\n"
)
def format_verification_infos(
verification_infos: list[_verification.VerificationInfo],
) -> str:
"""Format the verification result.
Args:
verification_infos: The verification result.
Returns:
The formatted verification result.
"""
return "\n".join(
f"`{info.name}`: `max_abs_diff={info.max_abs_diff:e}`, `max_rel_diff={info.max_rel_diff:e}`, "
f"`abs_diff_hist={info.abs_diff_hist}`, `rel_diff_hist={info.rel_diff_hist}`"
for info in verification_infos
)
def create_torch_export_error_report(
filename: str | os.PathLike,
formatted_traceback: str,
*,
export_status: ExportStatus,
profile_result: str | None,
):
with open(filename, "w", encoding="utf-8") as f:
f.write("# PyTorch ONNX Conversion Error Report\n\n")
f.write(_format_export_status(export_status))
f.write("Error message:\n\n")
f.write("```pytb\n")
f.write(formatted_traceback)
f.write("```\n\n")
if profile_result is not None:
f.write("## Profiling result\n\n")
f.write("```\n")
f.write(profile_result)
f.write("```\n")
def create_onnx_export_report(
filename: str | os.PathLike,
formatted_traceback: str,
program: torch.export.ExportedProgram,
*,
decomp_comparison: str | None = None,
export_status: ExportStatus,
profile_result: str | None,
model: ir.Model | None = None,
registry: _registration.ONNXRegistry | None = None,
verification_result: str | None = None,
):
with open(filename, "w", encoding="utf-8") as f:
f.write("# PyTorch ONNX Conversion Report\n\n")
f.write(_format_export_status(export_status))
f.write("## Error messages\n\n")
f.write("```pytb\n")
f.write(formatted_traceback)
f.write("\n```\n\n")
f.write("## Exported program\n\n")
f.write(_format_exported_program(program))
if model is not None:
f.write("## ONNX model\n\n")
f.write("```python\n")
f.write(str(model))
f.write("\n```\n\n")
f.write("## Analysis\n\n")
_analysis.analyze(program, file=f, registry=registry)
if decomp_comparison is not None:
f.write("\n## Decomposition comparison\n\n")
f.write(decomp_comparison)
f.write("\n")
if verification_result is not None:
f.write("\n## Verification results\n\n")
f.write(verification_result)
f.write("\n")
if profile_result is not None:
f.write("\n## Profiling result\n\n")
f.write("```\n")
f.write(profile_result)
f.write("```\n")
```
|
=================================================================================================================================
SOURCE CODE FILE: _schemas.py
LINES: 1
SIZE: 20.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_schemas.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import collections.abc
import dataclasses
import inspect
import logging
import types
import typing
from collections.abc import Iterator, Mapping, Sequence
from typing import Any, Optional, TypeVar, Union
import onnx
import onnxscript
from onnxscript import ir
logger = logging.getLogger(__name__)
# A special value to indicate that the default value is not specified
class _Empty:
def __repr__(self):
return "_EMPTY_DEFAULT"
_EMPTY_DEFAULT = _Empty()
# Map from python type to corresponding ONNX AttributeProto type
_PY_TYPE_TO_ATTR_TYPE = {
float: ir.AttributeType.FLOAT,
int: ir.AttributeType.INT,
str: ir.AttributeType.STRING,
bool: ir.AttributeType.INT,
ir.Tensor: ir.AttributeType.TENSOR,
ir.TensorProtocol: ir.AttributeType.TENSOR,
ir.Graph: ir.AttributeType.GRAPH,
ir.GraphProtocol: ir.AttributeType.GRAPH,
}
# Map from python type to corresponding ONNX AttributeProto type,
# for repeated (i.e., list of) values
_LIST_TYPE_TO_ATTR_TYPE = {
float: ir.AttributeType.FLOATS,
int: ir.AttributeType.INTS,
str: ir.AttributeType.STRINGS,
bool: ir.AttributeType.INTS,
ir.Tensor: ir.AttributeType.TENSORS,
ir.TensorProtocol: ir.AttributeType.TENSORS,
ir.Graph: ir.AttributeType.GRAPHS,
ir.GraphProtocol: ir.AttributeType.GRAPHS,
}
_ALL_VALUE_TYPES = (
{ir.TensorType(dtype) for dtype in ir.DataType}
| {ir.SequenceType(ir.TensorType(dtype)) for dtype in ir.DataType}
| {ir.OptionalType(ir.TensorType(dtype)) for dtype in ir.DataType}
)
# TypeAnnotationValue represents the (value of) valid type-annotations recognized
# by ONNX Script. Currently, it supports
# - float, int, str (primitive attribute types)
# - Sequence[float], Sequence[int], Sequence[str] (attribute types)
# - Tensor types
# - Sequence[Tensor] types
# - Union of above 2
# - TypeVars with above bounds
# - Above types with annotation attached
TypeAnnotationValue = Any
@dataclasses.dataclass(frozen=True)
class TypeConstraintParam:
"""Type constraint for a parameter.
Attributes:
name: Name of the parameter. E.g. "TFloat"
allowed_types: Allowed types for the parameter.
"""
name: str
allowed_types: set[ir.TypeProtocol]
description: str = ""
def __hash__(self) -> int:
return hash((self.name, tuple(self.allowed_types)))
def __str__(self) -> str:
allowed_types_str = " | ".join(str(t) for t in self.allowed_types)
return f"{self.name}={allowed_types_str}"
@classmethod
def any_tensor(cls, name: str, description: str = "") -> TypeConstraintParam:
return cls(name, {ir.TensorType(dtype) for dtype in ir.DataType}, description)
@classmethod
def any_value(cls, name: str, description: str = "") -> TypeConstraintParam:
return cls(name, _ALL_VALUE_TYPES, description) # type: ignore[arg-type]
@dataclasses.dataclass(frozen=True)
class Parameter:
"""A formal parameter of an operator."""
name: str
type_constraint: TypeConstraintParam
required: bool
variadic: bool
default: Any = _EMPTY_DEFAULT
# TODO: Add other properties too
def __str__(self) -> str:
type_str = self.type_constraint.name
if self.has_default():
return f"{self.name}: {type_str} = {self.default}"
return f"{self.name}: {type_str}"
def has_default(self) -> bool:
return self.default is not _EMPTY_DEFAULT
@dataclasses.dataclass(frozen=True)
class AttributeParameter:
"""A parameter in the function signature that represents an ONNX attribute."""
name: str
type: ir.AttributeType
required: bool
default: ir.Attr | None = None
def __str__(self) -> str:
type_str = self.type.name
if self.has_default():
return f"{self.name}: {type_str} = {self.default}"
return f"{self.name}: {type_str}"
def has_default(self) -> bool:
return self.default is not None
def _get_type_from_str(
type_str: str,
) -> ir.TensorType | ir.SequenceType | ir.OptionalType:
"""Converter a type_str from ONNX Opschema to ir.TypeProtocol.
A type str has the form of "tensor(float)" or composite type like "seq(tensor(float))".
"""
# TODO: Upstream this to IR
# Split the type_str a sequence types and dtypes
# 1. Remove the ending ")"
striped = type_str.rstrip(")")
# 2. Split the type_str by "("
type_parts = striped.split("(")
# Convert the dtype to ir.DataType
dtype = ir.DataType[type_parts[-1].upper()]
# Create a place holder type first
type_: ir.TypeProtocol = ir.TensorType(ir.DataType.UNDEFINED)
# Construct the type
for type_part in reversed(type_parts[:-1]):
if type_part == "tensor":
type_ = ir.TensorType(dtype)
elif type_part == "seq":
type_ = ir.SequenceType(type_)
elif type_part == "optional":
type_ = ir.OptionalType(type_)
else:
raise ValueError(f"Unknown type part: '{type_part}' in type '{type_str}'")
return type_ # type: ignore[return-value]
def _convert_formal_parameter(
param: onnx.defs.OpSchema.FormalParameter,
type_constraints: Mapping[str, TypeConstraintParam],
) -> Parameter:
"""Convert a formal parameter from ONNX Opschema to Parameter."""
if param.type_str in type_constraints:
type_constraint = type_constraints[param.type_str]
else:
# param.type_str can be a plain type like 'int64'.
type_constraint = TypeConstraintParam(
name=param.name,
allowed_types={_get_type_from_str(param.type_str)},
)
return Parameter(
name=param.name,
type_constraint=type_constraint,
required=param.option != onnx.defs.OpSchema.FormalParameterOption.Optional,
variadic=param.option == onnx.defs.OpSchema.FormalParameterOption.Variadic,
)
def _is_optional(type_: type) -> bool:
"""Returns whether a type_ is an Optional."""
origin_type = typing.get_origin(type_)
if origin_type is Union and type(None) in typing.get_args(type_):
# Python < 3.10
return True
if origin_type is Optional:
# Python >= 3.10
return True
if (
hasattr(types, "UnionType")
and origin_type is types.UnionType
and type(None) in typing.get_args(type_)
):
# Python >= 3.10
return True
return False
def _get_attr_type(type_: type) -> ir.AttributeType:
"""Obtain the type of the attribute from a Python class."""
try:
if type_ in _PY_TYPE_TO_ATTR_TYPE:
return _PY_TYPE_TO_ATTR_TYPE[type_]
origin_type = typing.get_origin(type_)
if origin_type is None:
return ir.AttributeType.UNDEFINED
if origin_type in (
collections.abc.Sequence,
Sequence,
list,
list,
tuple,
tuple,
):
inner_type = typing.get_args(type_)[0]
if inner_type in _LIST_TYPE_TO_ATTR_TYPE:
return _LIST_TYPE_TO_ATTR_TYPE[inner_type]
except TypeError:
logger.warning("TypeError when checking %s.", type_, exc_info=True)
return ir.AttributeType.UNDEFINED
def _get_type_constraint_name(type_: TypeAnnotationValue) -> str | None:
"""Returns the name of the type constraint for a given type annotation.
Args:
type_: A Python type.
Returns:
The name of the type constraint if it is a TypeVar.
- Prefixes the name with "Sequence_" if the type annotation is a Sequence[].
"""
if isinstance(type_, TypeVar):
return type_.__name__
if _is_optional(type_):
subtypes = typing.get_args(type_)
for subtype in subtypes:
if subtype is type(None):
continue
type_param_name = _get_type_constraint_name(subtype)
return type_param_name if type_param_name else None
origin_type = typing.get_origin(type_)
if isinstance(origin_type, type) and issubclass(origin_type, Sequence):
subtypes = typing.get_args(type_)
type_param_name = _get_type_constraint_name(subtypes[0])
return f"Sequence_{type_param_name}" if type_param_name else None
return None
def _get_allowed_types_from_type_annotation(
type_: TypeAnnotationValue,
) -> set[ir.TypeProtocol]:
"""Obtain the allowed types from a type annotation."""
if type_ is onnxscript.onnx_types.TensorType:
# Any tensor type
return {ir.TensorType(dtype) for dtype in ir.DataType}
allowed_types: set[ir.TypeProtocol]
if isinstance(type_, TypeVar):
allowed_types = set()
if constraints := type_.__constraints__:
for constraint in constraints:
allowed_types.update(
_get_allowed_types_from_type_annotation(constraint)
)
else:
bound = type_.__bound__
if bound is None:
allowed_types = _ALL_VALUE_TYPES # type: ignore[assignment]
else:
allowed_types.update(_get_allowed_types_from_type_annotation(bound))
return allowed_types
if hasattr(type_, "dtype"):
# A single tensor type like INT64, FLOAT, etc.
return {ir.TensorType(ir.DataType(type_.dtype))}
if _is_optional(type_):
allowed_types = set()
subtypes = typing.get_args(type_)
for subtype in subtypes:
if subtype is type(None):
continue
allowed_types.update(_get_allowed_types_from_type_annotation(subtype))
# NOTE: We do not consider dynamic optional types like optional(float) because they are not very useful.
return allowed_types
origin_type = typing.get_origin(type_)
if origin_type is Union:
allowed_types = set()
subtypes = typing.get_args(type_)
for subtype in subtypes:
assert subtype is not type(None), (
"Union should not contain None type because it is handled by _is_optional."
)
allowed_types.update(_get_allowed_types_from_type_annotation(subtype))
return allowed_types
if isinstance(origin_type, type) and issubclass(origin_type, Sequence):
subtypes = typing.get_args(type_)
return {
ir.SequenceType(t)
for t in _get_allowed_types_from_type_annotation(subtypes[0])
}
# Allow everything by default
return _ALL_VALUE_TYPES # type: ignore[return-value]
@dataclasses.dataclass
class OpSignature:
"""Schema for an operator.
Attributes:
domain: Domain of the operator. E.g. "".
name: Name of the operator. E.g. "Add".
overload: Overload name of the operator.
params: Input parameters. When the op is an ONNX function definition,
the order is according to the function signature. This mean we can
interleave ONNX inputs and ONNX attributes in the list.
outputs: Output parameters.
"""
domain: str
name: str
overload: str
params: Sequence[Parameter | AttributeParameter]
outputs: Sequence[Parameter]
params_map: Mapping[str, Parameter | AttributeParameter] = dataclasses.field(
init=False, repr=False
)
opset_version: int | None = None
def __post_init__(self):
self.params_map = {param.name: param for param in self.params}
def get(self, name: str) -> Parameter | AttributeParameter:
return self.params_map[name]
def __contains__(self, name: str) -> bool:
return name in self.params_map
def __iter__(self) -> Iterator[Parameter | AttributeParameter]:
return iter(self.params)
def __str__(self) -> str:
domain = self.domain or "''"
# TODO: Double check the separator for overload
overload = f"::{self.overload}" if self.overload else ""
params = ", ".join(str(param) for param in self.params)
outputs = ", ".join(str(param.type_constraint.name) for param in self.outputs)
type_constraints = {}
for param in self.params:
if isinstance(param, Parameter):
type_constraints[param.type_constraint.name] = param.type_constraint
for param in self.outputs:
type_constraints[param.type_constraint.name] = param.type_constraint
type_constraints_str = ", ".join(
str(type_constraint) for type_constraint in type_constraints.values()
)
return f"{domain}::{self.name}{overload}({params}) -> ({outputs}) where {type_constraints_str}"
@classmethod
def from_opschema(cls, opschema: onnx.defs.OpSchema) -> OpSignature:
"""Produce an OpSignature from an ONNX Opschema."""
type_constraints = {
constraint.type_param_str: TypeConstraintParam(
name=constraint.type_param_str,
allowed_types={
_get_type_from_str(type_str)
for type_str in constraint.allowed_type_strs
},
description=constraint.description,
)
for constraint in opschema.type_constraints
}
params = [
_convert_formal_parameter(param, type_constraints)
for param in opschema.inputs
]
for param in opschema.attributes.values():
default_attr = (
ir.serde.deserialize_attribute(param.default_value)
if param.default_value is not None
else None
)
if default_attr is not None:
# Set the name of the default attribute because it may have a different name from the parameter
default_attr.name = param.name
params.append(
AttributeParameter(
name=param.name,
type=ir.AttributeType(param.type), # type: ignore[arg-type]
required=param.required,
default=default_attr, # type: ignore[arg-type]
)
)
outputs = [
_convert_formal_parameter(param, type_constraints)
for param in opschema.outputs
]
return cls(
domain=opschema.domain,
name=opschema.name,
overload="",
params=params,
outputs=outputs,
opset_version=opschema.since_version,
)
@classmethod
def from_function(
cls,
func,
domain: str,
name: str | None = None,
overload: str = "",
*,
opset_version: int = 1,
) -> OpSignature:
"""Produce an OpSignature from a function using type annotation."""
py_signature = inspect.signature(func)
# Not using inspect.get_annotations because typing.get_type_hints seems to handle more cases
# https://github.com/python/cpython/issues/102405
type_hints = typing.get_type_hints(func)
params: list[Parameter | AttributeParameter] = []
# Create a mapping from type to a unique name
type_constraints: dict[str, TypeConstraintParam] = {}
for param in py_signature.parameters.values():
if param.name not in type_hints:
logger.warning(
"Missing annotation for parameter '%s' from %s. Treating as an Input.",
param.name,
py_signature,
)
type_constraint = TypeConstraintParam.any_value(f"T_{param.name}")
type_constraints[param.name] = type_constraint
params.append(
Parameter(
name=param.name,
type_constraint=type_constraint,
required=param.default is inspect.Parameter.empty,
# TODO: Handle variadic
variadic=False,
default=param.default
if param.default is not inspect.Parameter.empty
else _EMPTY_DEFAULT,
)
)
else:
type_ = type_hints[param.name]
if (attr_type := _get_attr_type(type_)) != ir.AttributeType.UNDEFINED:
# Construct the default attribute
if param.default is not inspect.Parameter.empty:
# TODO: Use ir_convenience instead to handle int as float
default = ir.Attr(param.name, attr_type, param.default)
else:
default = None
params.append(
AttributeParameter(
name=param.name,
type=attr_type,
required=param.default is inspect.Parameter.empty,
default=default,
)
)
else:
# Obtain the type constraint from the type annotation
# 1. Get a type constraint name from the type annotation
# If the type annotation is a TypeVar or Optional[TypeVar], get its name
# Otherwise, name it T_{param.name}
type_constraint_name = _get_type_constraint_name(type_)
if type_constraint_name is None:
type_constraint_name = f"T_{param.name}"
# 2. If the type constraint param is already initialized, use it
if type_constraint_name in type_constraints:
type_constraint = type_constraints[type_constraint_name]
else:
# 3. Otherwise, create a new TypeConstraintParam
type_constraint = TypeConstraintParam(
name=type_constraint_name,
allowed_types=_get_allowed_types_from_type_annotation(
type_
),
)
type_constraints[type_constraint_name] = type_constraint
# 4. Create Parameter
params.append(
Parameter(
name=param.name,
type_constraint=type_constraint,
required=param.default is inspect.Parameter.empty,
# TODO: Handle variadic
variadic=False,
default=param.default
if param.default is not inspect.Parameter.empty
else _EMPTY_DEFAULT,
)
)
return_type = type_hints.get("return")
outputs = []
if return_type is None:
# No returns
pass
else:
if typing.get_origin(return_type) is tuple:
# Multiple returns
return_types = typing.get_args(return_type)
else:
return_types = [return_type] # type: ignore[assignment]
for i, return_type_i in enumerate(return_types):
if (
return_param_name := _get_type_constraint_name(return_type_i)
) in type_constraints:
type_constraint = type_constraints[return_param_name]
else:
return_param_name = f"TReturn{i}"
type_constraint = TypeConstraintParam(
name=return_param_name,
allowed_types=_get_allowed_types_from_type_annotation(
return_type_i
),
)
type_constraints[return_param_name] = type_constraint
outputs.append(
Parameter(
name=return_param_name,
type_constraint=type_constraint,
required=True,
variadic=False,
default=_EMPTY_DEFAULT,
)
)
return cls(
domain=domain,
name=name or func.__name__,
overload=overload,
params=params,
outputs=outputs,
opset_version=opset_version,
)
```
|
=================================================================================================================================
SOURCE CODE FILE: _tensors.py
LINES: 1
SIZE: 2.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_tensors.py
ENCODING: utf-8
```py
"""Subclass of ir.Value that supports Python operators."""
# mypy: allow-untyped-defs
from __future__ import annotations
import onnxscript
from onnxscript import ir
class SymbolicTensor(ir.Value):
"""A subclass of ir.Value that supports Python operators."""
def __init__(
self,
opset: onnxscript.values.Opset,
name: str | None = None,
shape: ir.Shape | None = None,
type: ir.TypeProtocol | None = None,
doc_string: str | None = None,
const_value: ir.TensorProtocol | None = None,
):
super().__init__(
name=name,
shape=shape,
type=type,
doc_string=doc_string,
const_value=const_value,
)
self._opset = opset
@property
def rank(self) -> int | None:
if self.shape is None:
return None
return len(self.shape)
# TODO: Implement indexing
def __mod__(self, other):
if self.dtype in {
ir.DataType.FLOAT,
ir.DataType.DOUBLE,
ir.DataType.FLOAT16,
ir.DataType.BFLOAT16,
}:
return self._opset.Mod(self, other, fmod=1)
return self._opset.Mod(self, other)
def __ne__(self, other):
return self._opset.Not(self._opset.Equal(self, other))
def __neg__(self):
return self._opset.Neg(self)
def __add__(self, other):
return self._opset.Add(self, other)
def __radd__(self, other):
return self._opset.Add(other, self)
def __rand__(self, other):
return self._opset.And(other, self)
def __mul__(self, other):
return self._opset.Mul(self, other)
def __rmul__(self, other):
return self._opset.Mul(other, self)
def __matmul__(self, other):
return self._opset.MatMul(self, other)
def __pow__(self, other):
return self._opset.Pow(self, other)
def __sub__(self, other):
return self._opset.Sub(self, other)
def __rsub__(self, other):
return self._opset.Sub(other, self)
def __truediv__(self, other):
return self._opset.Div(self, other)
def __lt__(self, other):
return self._opset.Less(self, other)
def __le__(self, other):
return self._opset.LessOrEqual(self, other)
def __ge__(self, other):
return self._opset.GreaterOrEqual(self, other)
def __gt__(self, other):
return self._opset.Greater(self, other)
```
|
=================================================================================================================================
SOURCE CODE FILE: _testing.py
LINES: 1
SIZE: 3.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_testing.py
ENCODING: utf-8
```py
"""Test utilities for ONNX export."""
from __future__ import annotations
__all__ = ["assert_onnx_program"]
from typing import Any, TYPE_CHECKING
import torch
from torch.utils import _pytree
if TYPE_CHECKING:
from torch.onnx._internal.exporter import _onnx_program
def assert_onnx_program(
program: _onnx_program.ONNXProgram,
*,
rtol: float | None = None,
atol: float | None = None,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
strategy: str | None = "TorchExportNonStrictStrategy",
) -> None:
"""Assert that the ONNX model produces the same output as the PyTorch ExportedProgram.
Args:
program: The ``ONNXProgram`` to verify.
rtol: Relative tolerance.
atol: Absolute tolerance.
args: The positional arguments to pass to the program.
If None, the default example inputs in the ExportedProgram will be used.
kwargs: The keyword arguments to pass to the program.
If None, the default example inputs in the ExportedProgram will be used.
strategy: Assert the capture strategy used to export the program. Values can be
class names like "TorchExportStrategy" or "TorchExportNonStrictStrategy" etc.
If None, the strategy is not asserted.
"""
if strategy is not None:
if program._capture_strategy != strategy:
raise ValueError(
f"Expected strategy '{strategy}' is used to capture the exported program, "
f"but got '{program._capture_strategy}'."
)
exported_program = program.exported_program
if exported_program is None:
raise ValueError(
"The ONNXProgram does not contain an ExportedProgram. "
"To verify the ONNX program, initialize ONNXProgram with an ExportedProgram, "
"or assign the ExportedProgram to the ONNXProgram.exported_program attribute."
)
if args is None and kwargs is None:
# User did not provide example inputs, use the default example inputs
if exported_program.example_inputs is None:
raise ValueError(
"No example inputs provided and the exported_program does not contain example inputs. "
"Please provide arguments to verify the ONNX program."
)
args, kwargs = exported_program.example_inputs
if args is None:
args = ()
if kwargs is None:
kwargs = {}
torch_module = exported_program.module()
torch_outputs, _ = _pytree.tree_flatten(torch_module(*args, **kwargs))
# ONNX outputs are always real, so we need to convert torch complex outputs to real representations
torch_outputs = [
torch.view_as_real(output) if torch.is_complex(output) else output
for output in torch_outputs
]
onnx_outputs = program(*args, **kwargs)
# TODO(justinchuby): Include output names in the error message
torch.testing.assert_close(
tuple(onnx_outputs),
tuple(torch_outputs),
rtol=rtol,
atol=atol,
equal_nan=True,
check_device=False,
)
```
|
===========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_torchlib\__init__.py
ENCODING: utf-8
```py
```
|
=================================================================================================================================================
SOURCE CODE FILE: _tensor_typing.py
LINES: 1
SIZE: 2.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_torchlib\_tensor_typing.py
ENCODING: utf-8
```py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
"""Typings for function definitions."""
from __future__ import annotations
from typing import TypeVar, Union
from onnxscript import (
BFLOAT16,
BOOL,
COMPLEX128,
COMPLEX64,
DOUBLE,
FLOAT,
FLOAT16,
INT16,
INT32,
INT64,
INT8,
STRING,
UINT8,
)
# NOTE: We do not care about unsigned types beyond UINT8 because PyTorch does not us them.
# More detail can be found: https://pytorch.org/docs/stable/tensors.html
_TensorType = Union[
BFLOAT16,
BOOL,
COMPLEX64,
COMPLEX128,
DOUBLE,
FLOAT,
FLOAT16,
INT8,
INT16,
INT32,
INT64,
UINT8,
]
_FloatType = Union[FLOAT16, FLOAT, DOUBLE, BFLOAT16]
IntType = Union[INT8, INT16, INT32, INT64]
RealType = Union[
BFLOAT16,
FLOAT16,
FLOAT,
DOUBLE,
INT8,
INT16,
INT32,
INT64,
]
TTensor = TypeVar("TTensor", bound=_TensorType)
# Duplicate TTensor for inputs/outputs that accept the same set of types as TTensor
# but do not constrain the type to be the same as the other inputs/outputs
TTensor2 = TypeVar("TTensor2", bound=_TensorType)
TTensorOrString = TypeVar("TTensorOrString", bound=Union[_TensorType, STRING])
TFloat = TypeVar("TFloat", bound=_FloatType)
TFloatOrUInt8 = TypeVar(
"TFloatOrUInt8", bound=Union[FLOAT, FLOAT16, DOUBLE, INT8, UINT8]
)
TInt = TypeVar("TInt", bound=IntType)
TReal = TypeVar("TReal", bound=RealType)
TRealUnlessInt16OrInt8 = TypeVar(
"TRealUnlessInt16OrInt8",
bound=Union[FLOAT16, FLOAT, DOUBLE, BFLOAT16, INT32, INT64],
)
TRealUnlessFloat16OrInt8 = TypeVar(
"TRealUnlessFloat16OrInt8", bound=Union[DOUBLE, FLOAT, INT16, INT32, INT64]
)
TRealOrUInt8 = TypeVar("TRealOrUInt8", bound=Union[RealType, UINT8])
TFloatHighPrecision = TypeVar("TFloatHighPrecision", bound=Union[FLOAT, DOUBLE])
```
|
=====================================================================================================================================================
SOURCE CODE FILE: _torchlib_registry.py
LINES: 1
SIZE: 2.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_torchlib\_torchlib_registry.py
ENCODING: utf-8
```py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Registry for aten functions."""
from __future__ import annotations
__all__ = ["onnx_impl", "get_torchlib_ops"]
import logging
from collections.abc import Sequence
from typing import Any, Callable, TypeVar
import onnxscript
import torch
from torch.onnx._internal.exporter import _constants, _registration
_T = TypeVar("_T", bound=Callable)
logger = logging.getLogger("__name__")
_registry: list[_registration.OnnxDecompMeta] = []
def onnx_impl(
target: _registration.TorchOp | tuple[_registration.TorchOp, ...],
*,
trace_only: bool = False,
complex: bool = False,
no_compile: bool = False,
private: bool = False,
) -> Callable[[_T], _T]:
"""Register an ONNX implementation of a torch op."""
if isinstance(target, torch._ops.OpOverloadPacket):
raise TypeError(
f"Target '{target}' should be provided as an OpOverload instead of an "
"OpOverloadPacket. You can get the default overload with "
"<op>.default"
)
def wrapper(
func: _T,
) -> _T:
processed_func: Any
if no_compile:
processed_func = func
else:
torchlib_opset = onnxscript.values.Opset(
domain=_constants.TORCHLIB_DOMAIN, version=1
)
if not trace_only:
# Compile the function
processed_func = onnxscript.script(opset=torchlib_opset)(func)
else:
processed_func = onnxscript.TracedOnnxFunction(torchlib_opset, func)
if not private:
# TODO(justinchuby): Simplify the logic and remove the private attribute
# Skip registration if private
if not isinstance(target, Sequence):
targets = (target,)
else:
targets = target # type: ignore[assignment]
for t in targets:
_registry.append(
_registration.OnnxDecompMeta(
onnx_function=processed_func,
fx_target=t,
signature=None,
is_complex=complex,
skip_signature_inference=no_compile,
)
)
return processed_func # type: ignore[return-value]
return wrapper
def get_torchlib_ops() -> tuple[_registration.OnnxDecompMeta, ...]:
# Trigger op registration
from torch.onnx._internal.exporter._torchlib import ops
del ops
assert len(_registry) != 0
return tuple(_registry)
```
|
===============================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_torchlib\ops\__init__.py
ENCODING: utf-8
```py
from __future__ import annotations
__all__ = ["core", "hop"]
from torch.onnx._internal.exporter._torchlib.ops import core, hop
```
|
===========================================================================================================================================
SOURCE CODE FILE: core.py
LINES: 1
SIZE: 1.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_torchlib\ops\core.py
ENCODING: utf-8
```py
"""torch.ops.aten operators under the `core` module."""
# mypy: disable-error-code="misc,arg-type,type-arg,valid-type,assignment,return-value,type-var,operator,no-untyped-def,index"
# ruff: noqa: TCH001,TCH002
# flake8: noqa
from __future__ import annotations
import operator
from onnxscript.onnx_opset import opset18 as op
import torch
from torch.onnx._internal.exporter._torchlib._tensor_typing import TReal, TRealOrUInt8
from torch.onnx._internal.exporter._torchlib._torchlib_registry import onnx_impl
aten = torch.ops.aten
@onnx_impl((aten.abs.default, operator.abs), trace_only=True)
def aten_abs(self: TRealOrUInt8) -> TRealOrUInt8:
"""abs(Tensor self) -> Tensor"""
return op.Abs(self)
@onnx_impl(aten.abs.default, complex=True, trace_only=True)
def aten_abs_complex(self: TRealOrUInt8) -> TRealOrUInt8:
"""abs(Tensor self) -> Tensor"""
return op.ReduceL2(self, [-1], keepdims=False)
@onnx_impl((aten.add.Tensor, aten.add.Scalar, operator.add), trace_only=True)
def aten_add(self: TReal, other: TReal, alpha: float = 1.0) -> TReal:
"""add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"""
if alpha != 1.0:
alpha = op.CastLike(alpha, other)
other = op.Mul(other, alpha)
return op.Add(self, other)
@onnx_impl((aten.add.Tensor, aten.add.Scalar), trace_only=True, complex=True)
def aten_add_complex(self: TReal, other: TReal, alpha: float = 1.0) -> TReal:
"""add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"""
return aten_add(self, other, alpha=alpha)
```
|
==========================================================================================================================================
SOURCE CODE FILE: hop.py
LINES: 1
SIZE: 2.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_torchlib\ops\hop.py
ENCODING: utf-8
```py
"""Implementation for higher-order operators."""
from __future__ import annotations
from typing import TYPE_CHECKING
import torch
from torch.onnx._internal._lazy_import import onnxscript_ir as ir
from torch.onnx._internal.exporter import _core
from torch.onnx._internal.exporter._torchlib._torchlib_registry import onnx_impl
if TYPE_CHECKING:
from collections.abc import Sequence
def call_op(
op_type: str,
*args: ir.Value,
_num_outputs: int = 1,
_domain: str = "",
**kwargs: int | float | str | bool | ir.Graph | ir.TensorProtocol,
) -> Sequence[ir.Value]:
"""Call an operator with the given arguments and keyword arguments.
Arguments are always inputs, while keyword arguments are attributes.
"""
# This is a wrapper around the IR node creation that hooks into the _builder.OpRecorder
# tracer so that all nodes created are recorded the same way as if we were to use
# onnxscript ops directly.
from onnxscript.ir import convenience as ir_convenience
assert _core.current_tracer is not None
tracer = _core.current_tracer
inputs = list(args)
# If final inputs are None, strip them from the node inputs
for input in reversed(inputs):
if input is not None:
break
inputs.pop()
# Construct and filter out None attributes
attributes = [
attr
for attr in ir_convenience.convert_attributes(kwargs)
if attr.value is not None # type: ignore[union-attr]
]
tracer.nodes.append(
node := ir.Node(
_domain,
op_type,
inputs=inputs,
attributes=attributes,
num_outputs=_num_outputs,
version=tracer.opset.version,
)
)
return node.outputs
@onnx_impl(torch.ops.higher_order.cond, no_compile=True)
def higher_order_cond(
cond: ir.Value,
true_func: ir.Function,
false_func: ir.Function,
inputs: Sequence[ir.Value],
) -> Sequence[ir.Value]:
then_node = ir.Node(
true_func.domain, true_func.name, inputs, num_outputs=len(true_func.outputs)
)
else_node = ir.Node(
false_func.domain, false_func.name, inputs, num_outputs=len(false_func.outputs)
)
# ONNX Runtime complains about duplicate output names if we don't rename them.
# But the doesn't seem to be an actual violation of SSA form without renaming.
for func_out, out in zip(true_func.outputs, then_node.outputs):
out.name = f"{func_out.name}_{true_func.name}"
for func_out, out in zip(false_func.outputs, else_node.outputs):
out.name = f"{func_out.name}_{false_func.name}"
return call_op(
"If",
cond,
_num_outputs=len(true_func.outputs),
then_branch=ir.Graph(
(), then_node.outputs, nodes=[then_node], name=true_func.name
),
else_branch=ir.Graph(
(), else_node.outputs, nodes=[else_node], name=false_func.name
),
)
```
|
======================================================================================================================================
SOURCE CODE FILE: _verification.py
LINES: 1
SIZE: 11.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\exporter\_verification.py
ENCODING: utf-8
```py
from __future__ import annotations
__all__ = [
"VerificationInfo",
"verify_onnx_program",
]
import dataclasses
import logging
import math
from typing import Any, TYPE_CHECKING
import torch
from torch.utils import _pytree
if TYPE_CHECKING:
from onnxscript import ir
from torch.onnx._internal.exporter import _onnx_program
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class VerificationInfo:
"""Verification information for a value in the ONNX program.
This class contains the maximum absolute difference, maximum relative difference,
and histograms of absolute and relative differences between the expected and actual
values. It also includes the expected and actual data types.
The histograms are represented as tuples of tensors, where the first tensor is the
histogram counts and the second tensor is the bin edges.
Attributes:
name: The name of the value (output or intermediate).
max_abs_diff: The maximum absolute difference between the expected and actual values.
max_rel_diff: The maximum relative difference between the expected and actual values.
abs_diff_hist: A tuple of tensors representing the histogram of absolute differences.
The first tensor is the histogram counts and the second tensor is the bin edges.
rel_diff_hist: A tuple of tensors representing the histogram of relative differences.
The first tensor is the histogram counts and the second tensor is the bin edges.
expected_dtype: The data type of the expected value.
actual_dtype: The data type of the actual value.
"""
name: str
max_abs_diff: float
max_rel_diff: float
abs_diff_hist: tuple[torch.Tensor, torch.Tensor]
rel_diff_hist: tuple[torch.Tensor, torch.Tensor]
expected_dtype: torch.dtype
actual_dtype: torch.dtype
# NOTE: We don't need to include shape because the expected shape is already known
# and checked by the runtime
@classmethod
def from_tensors(
cls,
name: str,
expected: torch.Tensor | float | int | bool,
actual: torch.Tensor | float | int | bool,
) -> VerificationInfo:
"""Create a VerificationInfo object from two tensors.
Args:
name: The name of the value.
expected: The expected tensor.
actual: The actual tensor.
Returns:
VerificationInfo: The VerificationInfo object.
"""
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected)
if not isinstance(actual, torch.Tensor):
actual = torch.tensor(actual)
max_abs_diff, max_rel_diff, abs_diff, rel_diff = _compare_tensors(
expected, actual
)
bins = torch.tensor(
[0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0, 10, 1000000],
dtype=torch.float,
)
abs_diff_hist = torch.histogram(abs_diff.float(), bins=bins)
rel_diff_hist = torch.histogram(rel_diff.float(), bins=bins)
return cls(
name=name,
max_abs_diff=max_abs_diff,
max_rel_diff=max_rel_diff,
abs_diff_hist=abs_diff_hist,
rel_diff_hist=rel_diff_hist,
expected_dtype=expected.dtype,
actual_dtype=actual.dtype,
)
def _compare_tensors(
expected: torch.Tensor,
actual: torch.Tensor,
) -> tuple[float, float, torch.Tensor, torch.Tensor]:
# Move tensors to the same device
expected = expected.detach().cpu()
actual = actual.detach().cpu()
if expected.numel() == 0 or actual.numel() == 0:
return math.inf, math.inf, torch.tensor(math.inf), torch.tensor(math.inf)
if expected.dtype == torch.bool:
expected = expected.to(torch.float32)
actual = actual.to(torch.float32)
if torch.is_complex(expected):
expected = torch.view_as_real(expected)
abs_diff = torch.abs(expected - actual)
eps = 1e-7
normalizer = torch.abs(expected) + eps
rel_diff = abs_diff / normalizer
max_absolute_difference = abs_diff.max().item()
max_relative_difference = rel_diff.max().item()
return max_absolute_difference, max_relative_difference, abs_diff, rel_diff
def verify_onnx_program(
onnx_program: _onnx_program.ONNXProgram,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
compare_intermediates: bool = False,
) -> list[VerificationInfo]:
"""Verify the ONNX model by comparing the values with the expected values from ExportedProgram.
Args:
onnx_program: The ONNX program to verify.
args: The input arguments for the model.
kwargs: The keyword arguments for the model.
compare_intermediates: Whether to verify intermediate values. This is going
to take longer time, so it is disabled by default.
Returns:
VerificationInfo objects containing the verification information for each value.
"""
exported_program = onnx_program.exported_program
if exported_program is None:
raise ValueError(
"The ONNX program does not contain an exported_program. "
"Please provide an exported_program to verify the ONNX program."
)
if args is None and kwargs is None:
# User did not provide example inputs, use the default example inputs
if exported_program.example_inputs is None:
raise ValueError(
"No example inputs provided and the exported_program does not contain example inputs. "
"Please provide arguments to verify the ONNX program."
)
args, kwargs = exported_program.example_inputs
if args is None:
args = ()
if kwargs is None:
kwargs = {}
# Flatten args for ONNX program and the VerificationInterpreter
flat_args, _ = exported_program._get_flat_args_with_check(args, kwargs)
if not compare_intermediates:
# Compare the output values
torch_outputs, _ = _pytree.tree_flatten(
exported_program.module()(*args, **kwargs)
)
onnx_outputs = onnx_program(*flat_args)
results = []
for torch_output, onnx_output, output_val in zip(
torch_outputs, onnx_outputs, onnx_program.model.graph.outputs
):
results.append(
VerificationInfo.from_tensors(
name=str(output_val.name),
expected=torch_output,
actual=onnx_output,
)
)
return results
# Use the _VerificationInterpreter to get the intermediate values
# By design the output values are included too
interpreter = _VerificationInterpreter(onnx_program)
interpreter.run(*flat_args)
return interpreter.verification_infos
def _create_value_mapping(graph: ir.Graph) -> dict[str, ir.Value]:
"""Return a dictionary mapping names to values in the graph.
The mapping does not include values from subgraphs.
Args:
graph: The graph to extract the mapping from.
Returns:
A dictionary mapping names to values.
"""
values = {}
values.update(graph.initializers)
# The names of the values can be None or "", which we need to exclude
for input in graph.inputs:
if not input.name:
continue
values[input.name] = input
for node in graph:
for value in node.outputs:
if not value.name:
continue
values[value.name] = value
return values
class _VerificationInterpreter(torch.fx.Interpreter):
"""Interpreter for verifying converted ONNX model accuracy by comparing intermediate values.
To compare models, first initialize the interpreter with an ONNX program.
Then, call the :meth:`run` method with the input arguments to execute the model.
The :meth:`run` method will execute the model and populate the
:attr:`verification_infos` attribute with the verification information for each value.
::
onnx_program = torch.onnx.export(model, args, dynamo=True)
interpreter = _VerificationInterpreter(onnx_program)
interpreter.run(*args)
verification_infos = interpreter.verification_infos
for info in verification_infos:
print("value name:", info.name, info)
The verification information includes the maximum absolute difference, maximum relative
difference, and histograms of absolute and relative differences between the expected
and actual values. See :class:`VerificationInfo` for more details.
Attributes:
verification_infos: A list of verification information for each value.
It is populated when the `run` method is called.
"""
def __init__(self, onnx_program: torch.onnx.ONNXProgram) -> None:
"""Initialize the _VerificationInterpreter with an ONNX program.
Args:
onnx_program: The ONNX program to verify.
"""
if onnx_program.exported_program is None:
raise ValueError(
"The ONNX program does not contain an exported_program. "
"Please provide an exported_program to verify the ONNX program."
)
super().__init__(onnx_program.exported_program.module())
self._onnx_program = onnx_program
self._onnx_values = _create_value_mapping(onnx_program.model.graph)
self._args: tuple[Any, ...] = ()
self.verification_infos: list[VerificationInfo] = []
def run(
self,
*args: Any,
initial_env: dict[torch.fx.Node, Any] | None = None,
enable_io_processing: bool = True,
) -> Any:
"""Run the interpreter with the given input arguments.
This method executes the model and populates the :attr:`verification_infos` attribute
with the verification information for each value.
Args:
args: The input arguments for the model.
initial_env: The initial environment for the interpreter.
enable_io_processing: Whether to enable IO processing.
Returns:
Any: The result of executing the model.
"""
self.verification_infos = []
self._args = args
return super().run(
*args,
initial_env=initial_env,
enable_io_processing=enable_io_processing,
)
def run_node(self, n: torch.fx.Node) -> Any:
result = super().run_node(n)
if n.op != "call_function":
return result
node_name = n.name
if node_name not in self._onnx_values:
return result
try:
(onnx_result,) = self._onnx_program.compute_values([node_name], self._args)
except Exception as e:
logger.warning(
"Failed to compute value for node %s: %s",
node_name,
e,
)
return result
info = VerificationInfo.from_tensors(
name=node_name,
expected=result,
actual=onnx_result,
)
self.verification_infos.append(info)
if info.max_abs_diff > 0.01 or info.max_rel_diff > 0.1:
logger.warning(
"Verification info for node %s: max_abs_diff: %s, max_rel_diff: %s",
node_name,
info.max_abs_diff,
info.max_rel_diff,
)
else:
logger.info(
"Verification info for node %s: max_abs_diff: %s, max_rel_diff: %s",
node_name,
info.max_abs_diff,
info.max_rel_diff,
)
return result
```
|
===========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\__init__.py
ENCODING: utf-8
```py
from .patcher import ONNXTorchPatcher
from .serialization import save_model_with_external_data
__all__ = [
"save_model_with_external_data",
"ONNXTorchPatcher",
]
```
|
========================================================================================================================
SOURCE CODE FILE: _pass.py
LINES: 5
SIZE: 12.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\_pass.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import abc
import contextlib
import dataclasses
import difflib
import io
import logging
import sys
from typing import Any, Callable, TYPE_CHECKING
import torch
import torch.fx
from torch._subclasses.fake_tensor import unset_fake_temporarily
from torch.onnx._internal.fx import diagnostics, onnxfunction_dispatcher
if TYPE_CHECKING:
from torch._subclasses import fake_tensor
@dataclasses.dataclass
class PackageInfo:
package_name: str
version: str | None
commit_hash: str | None
def to_onnx_domain_string(self) -> str:
return ".".join(
filter(None, ("pkg", self.package_name, self.version, self.commit_hash))
)
@classmethod
def from_python_class(cls, python_class_name: type | str) -> PackageInfo:
if isinstance(python_class_name, type):
python_class_name = python_class_name.__module__
package_name = python_class_name.split(".")[0]
package = __import__(package_name)
version = getattr(package, "__version__", None)
# TODO: Figure out how to retrieve commit hash.
commit_hash = None
return cls(package_name, version, commit_hash)
@dataclasses.dataclass
class GraphModuleOnnxMeta:
package_info: PackageInfo
@contextlib.contextmanager
def _patch_difflib_sequence_matcher_init():
"""Context patching `difflib.SequenceMatcher` for fx readable graph.
Under this context, the `autojunk` argument of `difflib.SequenceMatcher` will always
be considered as `False`. This is to prevent `difflib.SequenceMatcher` recognizing
stacktrace messages in fx readable graph as junk, as these messages tend to be long (>200)
and repeat multiple times, which falls under the junk filter criteria.
`difflib.SequenceMatcher` is used underneath by all sorts of diffing functions
in `difflib`, including `difflib.unified_diff`, `difflib.ndiff`, `difflib.context_diff`.
Unfortunately, there is no way to pass `autojunk` argument to these functions, and
they all default to `True`. This context patching will affect all of them.
`Reference: Automatic junk heuristic <https://docs.python.org/3/library/difflib.html>`_
"""
original_init = difflib.SequenceMatcher.__init__
def patched_init(self, isjunk=None, a="", b="", autojunk=True):
original_init(self, isjunk, a, b, autojunk=False)
difflib.SequenceMatcher.__init__ = patched_init # type: ignore[assignment]
try:
yield
finally:
difflib.SequenceMatcher.__init__ = original_init # type: ignore[assignment]
def _unified_diff(a: str, b: str) -> str:
"""Return a string containing the unified diff of two strings.
This function calls a patched version of `difflib.unified_diff` with `autojunk` set
to `False` for `difflib.SequenceMatcher` class. More details can be found in
`_patch_difflib_sequence_matcher_init` function.
Args:
a: The first string.
b: The second string.
Returns:
The unified diff of the two strings. If there is no diff, return "<no diff>".
Example::
>>> a = '''class GraphModule(torch.nn.Module):
... def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor):
... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
... view = input_ids.view(-1, 3); input_ids = None
... '''
>>> b = '''class <lambda>(torch.nn.Module):
... def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]):
... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
... view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None
... '''
>>> print(_unified_diff(a, b))
---
+++
@@ -1,4 +1,4 @@
-class GraphModule(torch.nn.Module):
- def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor):
+class <lambda>(torch.nn.Module):
+ def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]):
# File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
- view = input_ids.view(-1, 3); input_ids = None
+ view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None
"""
a_list = a.splitlines(keepends=True)
b_list = b.splitlines(keepends=True)
with _patch_difflib_sequence_matcher_init():
# Set `n` to `sys.maxsize` to show entire graph when there is a diff.
diff = "".join(difflib.unified_diff(a_list, b_list, n=sys.maxsize))
if not diff:
return "<no diff>"
return diff
def _transform_diagnose_call_message_formatter(
run: Callable,
self: Transform,
*args: Any,
**kwargs: Any,
) -> str:
return f"Running {self.__class__.__name__} pass. "
def maybe_fx_graph_tabular(graph: torch.fx.Graph) -> str | None:
"""Return the Graph nodes in tabular format. Equivalent to stdout of `graph.print_tabular()`.
If `tabulate` is not installed, return `None`.
Args:
graph: The Graph to print.
Returns:
The Graph printed in a tabular format. None if `tabulate` is not installed.
"""
f = io.StringIO()
with contextlib.redirect_stdout(f):
try:
graph.print_tabular()
except ImportError:
return None
return f.getvalue()
class Transform(abc.ABC):
"""Base class for FX graph transformations to be used by FX-ONNX exporter.
Similar to `FX Interpreter <https://pytorch.org/docs/stable/fx.html#torch.fx.Interpreter>`_,
specializations of this class execute the FX graph Node-by-Node.
Methods in the `Transform` class can be overridden to customize the behavior of the model.
This pattern can be useful for many things, including writing code transformations as well as analysis passes.
The following methods can be overridden::
_run()
+-- run_node()
+-- placeholder()
+-- get_attr()
+-- call_function()
+-- call_method()
+-- call_module()
+-- output()
One important aspect to note is that if the transformation modifies the model input and/or output signature,
(e.g. additional inputs/outputs are added to the model), :class:`InputAdaptStep` and/or :class:`OutputAdaptStep`
are needed to reconcile :attr:`ONNXProgram.model_proto`.
That is, the model signature and the model representation must match.
As an additional feature, this class provides builtin support for transformation recording using the diagnostics.
The granularity of overriding is up to the user. And it affects the granularity of
the diagnostics information. For example, if `_run()` is overridden, the
diagnostics information will only contain graph level transformation. Instead,
if `call_function()` is overridden, the diagnostics information will additionally
contain the node level information of `call_function()`.
TODO(bowbao): Add more overridable methods in call hierarchy
TODO(bowbao): Create an example once more overridable methods are added.
"""
diagnostic_context: diagnostics.DiagnosticContext
"""The diagnostic context for recording diagnostics."""
module: torch.fx.GraphModule
"""The module to be transformed."""
fake_mode: fake_tensor.FakeTensorMode | None
"""The existing fake mode detected from `self.module`."""
def __init__(
self,
diagnostic_context: diagnostics.DiagnosticContext,
module: torch.fx.GraphModule,
):
"""Initialize the transform.
Args:
diagnostic_context: The diagnostic context for recording diagnostics.
module: The module to be transformed.
"""
self.diagnostic_context = diagnostic_context
self.module = module
self.fake_mode = self._detect_fake_mode()
def _detect_fake_mode(self) -> fake_tensor.FakeTensorMode | None:
"""Detect fake mode from the graph.
Scan through all nodes in graph and their meta['val'] to detect fake mode.
"""
fake_tensors = [node.meta.get("val") for node in self.module.graph.nodes]
with unset_fake_temporarily():
return torch._dynamo.utils.detect_fake_mode(fake_tensors)
def _maybe_fakefy_args(
self, fake_mode: fake_tensor.FakeTensorMode | None, *args: Any
) -> tuple[Any, ...]:
if fake_mode is None:
return args
# NB: This should hit the cache if tensors were fakefied before.
# E.g., when the fx graph is produced by Dynamo.
return tuple(
fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args
)
@abc.abstractmethod
def _run(self, *args, **kwargs) -> torch.fx.GraphModule: ...
@diagnostics.diagnose_call(
diagnostics.rules.fx_pass,
diagnostic_message_formatter=_transform_diagnose_call_message_formatter,
)
def run(self, *args, **kwargs) -> torch.fx.GraphModule:
"""Run the transform on `self.module`.
Note that this method may or may not mutate `self.module`, and the returned
`GraphModule` could be either `self.module` or a new `GraphModule`.
Args:
*args: Positional arguments for `self.module` to run.
**kwargs: Keyword arguments for `self.module` to run.
"""
diagnostic = self.diagnostic_context.inflight_diagnostic(
rule=diagnostics.rules.fx_pass
)
diagnostic.info(
"For detailed logging of graph modifications by this pass, either set "
"`DiagnosticOptions.verbosity_level` to `logging.DEBUG` or use the environment variable "
"`TORCH_LOGS='onnx_diagnostics'`."
)
# Gather graph information before transform.
graph_diff_log_level = logging.DEBUG
if diagnostic.logger.isEnabledFor(graph_diff_log_level):
# Cannot use LazyString because the graph may have been mutated at evaluation time.
old_readable_graph = self.module.print_readable(print_output=False)
old_tabular = maybe_fx_graph_tabular(self.module.graph)
else:
# Set to empty string to avoid unbound warning. This value should never be
# used since the log level is not enabled.
old_readable_graph = ""
old_tabular = ""
module = self._run(*args, **kwargs)
# Gather graph information after transform.
if diagnostic.logger.isEnabledFor(graph_diff_log_level):
new_readable_graph = module.print_readable(print_output=False)
new_tabular = maybe_fx_graph_tabular(module.graph)
with diagnostic.log_section(graph_diff_log_level, "Graph diff:"):
diagnostic.log(
graph_diff_log_level,
"```\n%s\n```",
diagnostics.LazyString(
_unified_diff, old_readable_graph, new_readable_graph
),
)
with diagnostic.log_section(graph_diff_log_level, "Tabular diff:"):
if old_tabular is None or new_tabular is None:
diagnostic.log(
graph_diff_log_level,
"Tabular diff is not available because `tabulate` is not installed.",
)
else:
diagnostic.log(
graph_diff_log_level,
"```\n%s\n```",
diagnostics.LazyString(_unified_diff, old_tabular, new_tabular),
)
return module
class AnalysisResult(abc.ABC): # noqa: B024
...
class Analysis(abc.ABC):
def __init__(
self,
diagnostic_context: diagnostics.DiagnosticContext,
module: torch.fx.GraphModule,
onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
):
self.diagnostic_context = diagnostic_context
self.module = module
self.onnxfunction_dispatcher = onnxfunction_dispatcher
@abc.abstractmethod
def analyze(self, diagnostic_level: diagnostics.infra.Level) -> AnalysisResult: ...
```
|
====================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\analysis\__init__.py
ENCODING: utf-8
```py
from .unsupported_nodes import UnsupportedFxNodesAnalysis
__all__ = [
"UnsupportedFxNodesAnalysis",
]
```
|
=============================================================================================================================================
SOURCE CODE FILE: unsupported_nodes.py
LINES: 1
SIZE: 3.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\analysis\unsupported_nodes.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import dataclasses
from torch.onnx._internal.fx import _pass, diagnostics, registration
@dataclasses.dataclass
class UnsupportedFxNodesAnalysisResult(_pass.AnalysisResult):
unsupported_op_to_target_mapping: dict[str, dict[str, None]]
class UnsupportedFxNodesAnalysis(_pass.Analysis):
"""An analysis that detects unsupported FX nodes in the graph."""
def _lint(
self,
analysis_result: UnsupportedFxNodesAnalysisResult,
diagnostic_level: diagnostics.infra.Level,
):
"""Lint the graph and emit diagnostics if unsupported FX nodes are found."""
if not analysis_result.unsupported_op_to_target_mapping:
return
normalized_op_targets_map = {
op: list(targets.keys())
for op, targets in analysis_result.unsupported_op_to_target_mapping.items()
}
rule = diagnostics.rules.unsupported_fx_node_analysis
diagnostic = diagnostics.Diagnostic(
rule,
level=diagnostic_level,
message=rule.format_message(normalized_op_targets_map),
)
self.diagnostic_context.log_and_raise_if_error(diagnostic)
def analyze(
self, diagnostic_level: diagnostics.infra.Level
) -> UnsupportedFxNodesAnalysisResult:
"""Analyze the graph, emit diagnostics and return a result that contains unsupported FX nodes.
Args:
diagnostic_level: The diagnostic level to use when emitting diagnostics.
Returns:
An analysis result that contains unsupported FX nodes.
Raises:
RuntimeErrorWithDiagnostic: If diagnostics are emitted and the diagnostic
level is `ERROR`.
"""
op_to_target_mapping: dict[str, dict[str, None]] = {}
for node in self.module.graph.nodes:
if node.op == "call_function":
# NOTE: OPSchema matcher is not in this analysis scope.
internal_opname: registration.OpName = (
self.onnxfunction_dispatcher._get_aten_name(
node=node, diagnostic_context=self.diagnostic_context
)
)
overload_registration = (
self.onnxfunction_dispatcher.onnx_registry.is_registered_op(
namespace=internal_opname.namespace,
op_name=internal_opname.op_name,
overload=internal_opname.overload,
)
)
# NOTE: Fall back to default overload if the ONNX registry doesn't have the overload.
default_registration = (
self.onnxfunction_dispatcher.onnx_registry.is_registered_op(
namespace=internal_opname.namespace,
op_name=internal_opname.op_name,
overload=None,
)
)
if not overload_registration and not default_registration:
op_to_target_mapping.setdefault(node.op, {}).setdefault(
str(node.target), None
)
analysis_result = UnsupportedFxNodesAnalysisResult(op_to_target_mapping)
self._lint(analysis_result, diagnostic_level)
return analysis_result
```
|
=====================================================================================================================================
SOURCE CODE FILE: decomposition_skip.py
LINES: 1
SIZE: 8.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\decomposition_skip.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""A context manager that disables the decomposition of certain ops during dynamo tracing.
The approach is to temporarily hijack the operator callable with PT2 custom operator.
The custom operator will not be decomposed and will show up as a single node to be exported to ONNX.
For the time being the decomposition of these ops is otherwise unavoidable.
https://github.com/pytorch/pytorch/issues/116684
https://github.com/pytorch/pytorch/issues/115883
This solution will no longer be required once the issue is resolved.
"""
from __future__ import annotations
import abc
import contextlib
from typing import Callable, TYPE_CHECKING
from onnxscript.function_libs.torch_lib.ops import ( # type: ignore[import-not-found]
core as torchlib_core,
nn as torchlib_nn,
)
import torch
from torch._decomp import decompositions
if TYPE_CHECKING:
from collections.abc import Sequence
_NEW_OP_NAMESPACE: str = "onnx_export"
"""The namespace for the custom operator."""
class DecompSkip(abc.ABC):
op_callable: Callable
"""The original operator callable to skip decomposition."""
onnxscript_function: Callable
"""The ONNXScript function to be registered for exporting the custom operator."""
new_op_name: str
"""The name for the custom operator."""
new_op_schema: str
"""The schema for the custom operator. This should match with the signature of the original operator."""
@classmethod
@abc.abstractmethod
def register(cls, export_options: torch.onnx.ExportOptions):
"""Registers the custom operator and overrides the original operator.
It should do the following steps in order:
1. Register the custom operator.
2. Override the original operator with the replacement callable.
3. Register the ONNXScript function for exporting the custom operator.
"""
...
@classmethod
@abc.abstractmethod
def unregister(cls):
"""Restores the original operator callable."""
...
@classmethod
@abc.abstractmethod
def abstract(cls, *args, **kwargs):
"""An abstract impl (meta kernel) for the operator."""
...
@classmethod
def register_custom_op(cls):
"""Registers the custom operator."""
new_op_qualname = f"{_NEW_OP_NAMESPACE}::{cls.new_op_name}"
torch.library.define(new_op_qualname, cls.new_op_schema)
torch.library.impl(new_op_qualname, "default", cls.replacement)
torch.library.register_fake(new_op_qualname, cls.abstract)
@classmethod
def replacement(cls, *args, **kwargs):
"""A replacement callable for the operator to be hijacked.
This has the same signature and eager behavior as the original operator.
"""
return cls.op_callable(*args, **kwargs)
class UpsampleBilinear2DDecompSkip(DecompSkip):
op_callable = torch._C._nn.upsample_bilinear2d # type: ignore[attr-defined]
onnxscript_function = torchlib_nn.aten_upsample_bilinear2d_vec # type: ignore[attr-defined]
new_op_name = "upsample_bilinear2d"
new_op_schema = "(Tensor self, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)"
@classmethod
def register(cls, export_options: torch.onnx.ExportOptions):
if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr(
torch.ops.onnx_export, cls.new_op_name
):
cls.register_custom_op()
torch._C._nn.upsample_bilinear2d = torch.ops.onnx_export.upsample_bilinear2d # type: ignore[attr-defined]
if export_options.onnx_registry is None:
export_options.onnx_registry = torch.onnx.OnnxRegistry()
registry = export_options.onnx_registry
registry.register_op(
function=cls.onnxscript_function,
namespace=_NEW_OP_NAMESPACE,
op_name=cls.new_op_name,
)
@classmethod
def unregister(cls):
torch._C._nn.upsample_bilinear2d = cls.op_callable # type: ignore[attr-defined]
@classmethod
def abstract(cls, input, output_size, align_corners, scale_factors):
osize = decompositions.upsample_compute_output_size(
input.size(), output_size, scale_factors
)
return torch.empty(
(input.size(0), input.size(1), *osize),
dtype=input.dtype,
device=input.device,
)
class UpsampleTrilinear3DDecompSkip(DecompSkip):
op_callable = torch._C._nn.upsample_trilinear3d # type: ignore[attr-defined]
onnxscript_function = torchlib_nn.aten_upsample_trilinear3d_vec # type: ignore[attr-defined]
new_op_name = "upsample_trilinear3d"
new_op_schema = "(Tensor self, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)"
@classmethod
def register(cls, export_options: torch.onnx.ExportOptions):
if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr(
torch.ops.onnx_export, cls.new_op_name
):
cls.register_custom_op()
torch._C._nn.upsample_trilinear3d = torch.ops.onnx_export.upsample_trilinear3d # type: ignore[attr-defined]
if export_options.onnx_registry is None:
export_options.onnx_registry = torch.onnx.OnnxRegistry()
registry = export_options.onnx_registry
registry.register_op(
function=cls.onnxscript_function,
namespace=_NEW_OP_NAMESPACE,
op_name=cls.new_op_name,
)
@classmethod
def unregister(cls):
torch._C._nn.upsample_trilinear3d = cls.op_callable # type: ignore[attr-defined]
@classmethod
def abstract(cls, input, output_size, align_corners, scale_factors):
osize = decompositions.upsample_compute_output_size(
input.size(), output_size, scale_factors
)
return torch.empty(
(input.size(0), input.size(1), input.size(2), *osize),
dtype=input.dtype,
device=input.device,
)
class InstanceNormDecompSkip(DecompSkip):
op_callable = torch.instance_norm # type: ignore[attr-defined]
onnxscript_function = torchlib_core.aten_instance_norm # type: ignore[attr-defined]
new_op_name = "instance_norm"
new_op_schema = (
"(Tensor input, Tensor? weight, Tensor? bias, "
"Tensor? running_mean, Tensor? running_var, "
"bool use_input_stats, float momentum, float eps, "
"bool cudnn_enabled) -> Tensor"
)
@classmethod
def register(cls, export_options: torch.onnx.ExportOptions):
if not hasattr(torch.ops, _NEW_OP_NAMESPACE) or not hasattr(
torch.ops.onnx_export, cls.new_op_name
):
cls.register_custom_op()
torch.instance_norm = torch.ops.onnx_export.instance_norm # type: ignore[attr-defined]
if export_options.onnx_registry is None:
export_options.onnx_registry = torch.onnx.OnnxRegistry()
registry = export_options.onnx_registry
registry.register_op(
function=cls.onnxscript_function,
namespace=_NEW_OP_NAMESPACE,
op_name=cls.new_op_name,
)
@classmethod
def unregister(cls):
torch.instance_norm = cls.op_callable # type: ignore[attr-defined]
@classmethod
def abstract(
cls,
input,
weight,
bias,
running_mean,
running_var,
use_input_stats: bool,
momentum: float,
eps: float,
cudnn_enabled: bool,
):
return torch.empty(
input.size(),
dtype=input.dtype,
device=input.device,
)
_DEFAULT_SKIP_LIST = [
UpsampleBilinear2DDecompSkip,
InstanceNormDecompSkip,
UpsampleTrilinear3DDecompSkip,
]
@contextlib.contextmanager
def enable_decomposition_skips(
export_options: torch.onnx.ExportOptions,
skips: Sequence[type[DecompSkip]] = _DEFAULT_SKIP_LIST,
):
"""A context manager that enables the decomposition skips.
The original operator callables that are otherwise decomposed are replaced with custom operators.
The ONNXScript functions for exporting the custom operators are added to the ONNX registry inside export_options.
"""
try:
for skip in skips:
skip.register(export_options)
yield
finally:
for skip in skips:
skip.unregister()
```
|
======================================================================================================================================
SOURCE CODE FILE: decomposition_table.py
LINES: 1
SIZE: 5.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\decomposition_table.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Dispatcher for AtenLib functions from onnx-script."""
from __future__ import annotations
from typing import Callable
import torch
import torch._ops
import torch.fx
from torch.onnx._internal.fx import registration
def _create_onnx_supports_op_overload_table(
registry,
) -> set[torch._ops.OperatorBase | Callable]:
"""
Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations.
Args:
registry (OnnxRegistry): The ONNX registry for PyTorch.
Returns:
A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations.
"""
table: set[torch._ops.OperatorBase | Callable] = set()
# Some ops in `torch.ops.aten` are not discoverable through `dir(torch.ops.aten)`,
# but retrievable via explicit lookup.
# https://github.com/pytorch/pytorch/issues/99681
# This is a workaround to make sure we register ONNX symbolic functions for these.
onnx_supported_aten_lookup_table = [
k.split("::")[1].split(".")[0]
for k in registry._all_registered_ops()
if k.startswith("aten::")
]
for op_namespace in (torch.ops.aten, torch.ops.prims):
attr_names = dir(op_namespace)
if op_namespace is torch.ops.aten:
attr_names += onnx_supported_aten_lookup_table
for attr_name in attr_names:
if not hasattr(op_namespace, attr_name):
# torchlib owns some attributes that are not aten ops.
continue
op_overload_packet = getattr(op_namespace, attr_name)
if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket):
continue
for overload_name in op_overload_packet.overloads():
op_overload = getattr(op_overload_packet, overload_name)
internal_op_name = registration.OpName.from_qualified_name(
qualified_name=op_overload.name()
)
# NOTE: If the overload is supported in registry or it's default overload is supported in registry,
# we add it to the table.
if registry.is_registered_op(
namespace=internal_op_name.namespace,
op_name=internal_op_name.op_name,
overload=internal_op_name.overload,
) or registry.is_registered_op(
namespace=internal_op_name.namespace,
op_name=internal_op_name.op_name,
overload=None,
):
# This line maps torch.ops.aten.add.Tensor, torch.ops.aten.add.Scalar, torch.ops.aten.add.out, etc
# to "aten::add". This means the exporter for "aten::add" is used for all overloads of "aten::add".
# This is applied to all ops under torch.ops.aten.
table.add(op_overload)
return table
def create_onnx_friendly_decomposition_table(
registry,
) -> dict[torch._ops.OperatorBase, Callable]:
"""
This function creates a dictionary of op overloads and their decomposition functions
for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function,
its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's
built-in aten-to-aten decomposition.
Args:
registry (torch.onnx.OnnxRegistry): The ONNX registry for PyTorch.
Returns:
Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding
decomposition functions.
"""
decomposition_table: dict[torch._ops.OperatorBase, Callable] = {}
# Dictionary that maps torch.ops.aten.* to exporter look up key; e.g.,
# _OP_OVERLOAD_TO_EXPORTER_KEY_TABLE[torch.add.Tensor] is "aten::add".
_ONNX_SUPPORT_OP_OVERLOADS = _create_onnx_supports_op_overload_table(registry)
# NOTE: If we import torch._decomp, we will get RuntimeError: Only a single
# TORCH_LIBRARY can be used to register the namespace nvprims; please put all of your
# definitions in a single TORCH_LIBRARY block.
for op_overload, decomp_fn in torch._decomp.decomposition_table.items():
# Skip decomposition into "prim::*" ops (defined in 'torch._refs'), because they
# are not generally supported by ONNX.
# Skip decomposition for op_overload as long as that op_overload has a corresponding ONNX
# symbolic function.
if (
"torch._refs" in decomp_fn.__module__
or op_overload in _ONNX_SUPPORT_OP_OVERLOADS
):
continue
decomposition_table[op_overload] = decomp_fn
# NOTE: There are ops in core ATen and under torch._refs,
# that are not decomposed to prim::ops. We need to pick them
# back
for op_overload, decomp_fn in torch._decomp.core_aten_decompositions().items():
if op_overload in _ONNX_SUPPORT_OP_OVERLOADS:
continue
decomposition_table[op_overload] = decomp_fn
return decomposition_table
```
|
==============================================================================================================================
SOURCE CODE FILE: diagnostics.py
LINES: 10
SIZE: 8.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\diagnostics.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import dataclasses
import functools
from typing import Any, TYPE_CHECKING
import onnxscript # type: ignore[import]
from onnxscript.function_libs.torch_lib import graph_building # type: ignore[import]
import torch
import torch.fx
from torch.onnx._internal import diagnostics
from torch.onnx._internal.diagnostics import infra
from torch.onnx._internal.diagnostics.infra import decorator, formatter
from torch.onnx._internal.fx import registration, type_utils as fx_type_utils
if TYPE_CHECKING:
import logging
# NOTE: The following limits are for the number of items to display in diagnostics for
# a list, tuple or dict. The limit is picked such that common useful scenarios such as
# operator arguments are covered, while preventing excessive processing loads on considerably
# large containers such as the dictionary mapping from fx to onnx nodes.
_CONTAINER_ITEM_LIMIT: int = 10
# NOTE(bowbao): This is a shim over `torch.onnx._internal.diagnostics`, which is
# used in `torch.onnx`, and loaded with `torch`. Hence anything related to `onnxscript`
# cannot be put there.
# [NOTE: `dynamo_export` diagnostics logging]
# The 'dynamo_export' diagnostics leverages the PT2 artifact logger to handle the verbosity
# level of logs that are recorded in each SARIF log diagnostic. In addition to SARIF log,
# terminal logging is by default disabled. Terminal logging can be activated by setting
# the environment variable `TORCH_LOGS="onnx_diagnostics"`. When the environment variable
# is set, it also fixes logging level to `logging.DEBUG`, overriding the verbosity level
# specified in the diagnostic options.
# See `torch/_logging/__init__.py` for more on PT2 logging.
_ONNX_DIAGNOSTICS_ARTIFACT_LOGGER_NAME = "onnx_diagnostics"
diagnostic_logger = torch._logging.getArtifactLogger(
"torch.onnx", _ONNX_DIAGNOSTICS_ARTIFACT_LOGGER_NAME
)
def is_onnx_diagnostics_log_artifact_enabled() -> bool:
return torch._logging._internal.log_state.is_artifact_enabled(
_ONNX_DIAGNOSTICS_ARTIFACT_LOGGER_NAME
)
@functools.singledispatch
def _format_argument(obj: Any) -> str:
return formatter.format_argument(obj)
def format_argument(obj: Any) -> str:
formatter = _format_argument.dispatch(type(obj))
return formatter(obj)
# NOTE: EDITING BELOW? READ THIS FIRST!
#
# The below functions register the `format_argument` function for different types via
# `functools.singledispatch` registry. These are invoked by the diagnostics system
# when recording function arguments and return values as part of a diagnostic.
# Hence, code with heavy workload should be avoided. Things to avoid for example:
# `torch.fx.GraphModule.print_readable()`.
@_format_argument.register
def _torch_nn_module(obj: torch.nn.Module) -> str:
return f"torch.nn.Module({obj.__class__.__name__})"
@_format_argument.register
def _torch_fx_graph_module(obj: torch.fx.GraphModule) -> str:
return f"torch.fx.GraphModule({obj.__class__.__name__})"
@_format_argument.register
def _torch_fx_node(obj: torch.fx.Node) -> str:
node_string = f"fx.Node({obj.target})[{obj.op}]:"
if "val" not in obj.meta:
return node_string + "None"
return node_string + format_argument(obj.meta["val"])
@_format_argument.register
def _torch_fx_symbolic_bool(obj: torch.SymBool) -> str:
return f"SymBool({obj})"
@_format_argument.register
def _torch_fx_symbolic_int(obj: torch.SymInt) -> str:
return f"SymInt({obj})"
@_format_argument.register
def _torch_fx_symbolic_float(obj: torch.SymFloat) -> str:
return f"SymFloat({obj})"
@_format_argument.register
def _torch_tensor(obj: torch.Tensor) -> str:
return f"Tensor({fx_type_utils.from_torch_dtype_to_abbr(obj.dtype)}{_stringify_shape(obj.shape)})"
@_format_argument.register
def _int(obj: int) -> str:
return str(obj)
@_format_argument.register
def _float(obj: float) -> str:
return str(obj)
@_format_argument.register
def _bool(obj: bool) -> str:
return str(obj)
@_format_argument.register
def _str(obj: str) -> str:
return obj
@_format_argument.register
def _registration_onnx_function(obj: registration.ONNXFunction) -> str:
# TODO: Compact display of `param_schema`.
return f"registration.ONNXFunction({obj.op_full_name}, is_custom={obj.is_custom}, is_complex={obj.is_complex})"
@_format_argument.register
def _list(obj: list) -> str:
list_string = f"List[length={len(obj)}](\n"
if not obj:
return list_string + "None)"
for i, item in enumerate(obj):
if i >= _CONTAINER_ITEM_LIMIT:
# NOTE: Print only first _CONTAINER_ITEM_LIMIT items.
list_string += "...,\n"
break
list_string += f"{format_argument(item)},\n"
return list_string + ")"
@_format_argument.register
def _tuple(obj: tuple) -> str:
tuple_string = f"Tuple[length={len(obj)}](\n"
if not obj:
return tuple_string + "None)"
for i, item in enumerate(obj):
if i >= _CONTAINER_ITEM_LIMIT:
# NOTE: Print only first _CONTAINER_ITEM_LIMIT items.
tuple_string += "...,\n"
break
tuple_string += f"{format_argument(item)},\n"
return tuple_string + ")"
@_format_argument.register
def _dict(obj: dict) -> str:
dict_string = f"Dict[length={len(obj)}](\n"
if not obj:
return dict_string + "None)"
for i, (key, value) in enumerate(obj.items()):
if i >= _CONTAINER_ITEM_LIMIT:
# NOTE: Print only first _CONTAINER_ITEM_LIMIT items.
dict_string += "...\n"
break
dict_string += f"{key}: {format_argument(value)},\n"
return dict_string + ")"
@_format_argument.register
def _torch_nn_parameter(obj: torch.nn.Parameter) -> str:
return f"Parameter({format_argument(obj.data)})"
@_format_argument.register
def _onnxscript_torch_script_tensor(obj: graph_building.TorchScriptTensor) -> str:
return f"`TorchScriptTensor({fx_type_utils.from_torch_dtype_to_abbr(obj.dtype)}{_stringify_shape(obj.shape)})`" # type: ignore[arg-type] # noqa: B950
@_format_argument.register
def _onnxscript_onnx_function(obj: onnxscript.OnnxFunction) -> str:
return f"`OnnxFunction({obj.name})`"
@_format_argument.register
def _onnxscript_traced_onnx_function(obj: onnxscript.TracedOnnxFunction) -> str:
return f"`TracedOnnxFunction({obj.name})`"
# from torch/fx/graph.py to follow torch format
def _stringify_shape(shape: torch.Size | None) -> str:
if shape is None:
return ""
return f"[{', '.join(str(x) for x in shape)}]"
rules = diagnostics.rules
levels = diagnostics.levels
RuntimeErrorWithDiagnostic = infra.RuntimeErrorWithDiagnostic
LazyString = formatter.LazyString
DiagnosticOptions = infra.DiagnosticOptions
@dataclasses.dataclass
class Diagnostic(infra.Diagnostic):
logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger)
def log(self, level: int, message: str, *args, **kwargs) -> None:
if self.logger.isEnabledFor(level):
formatted_message = message % args
if is_onnx_diagnostics_log_artifact_enabled():
# Only log to terminal if artifact is enabled.
# See [NOTE: `dynamo_export` diagnostics logging] for details.
self.logger.log(level, formatted_message, **kwargs)
self.additional_messages.append(formatted_message)
@dataclasses.dataclass
class DiagnosticContext(infra.DiagnosticContext[Diagnostic]):
logger: logging.Logger = dataclasses.field(init=False, default=diagnostic_logger)
_bound_diagnostic_type: type[Diagnostic] = dataclasses.field(
init=False, default=Diagnostic
)
def __enter__(self):
self._previous_log_level = self.logger.level
# Adjust the logger level based on `options.verbosity_level` and the environment
# variable `TORCH_LOGS`. See [NOTE: `dynamo_export` diagnostics logging] for details.
if not is_onnx_diagnostics_log_artifact_enabled():
return super().__enter__()
else:
return self
diagnose_call = functools.partial(
decorator.diagnose_call,
diagnostic_type=Diagnostic,
format_argument=format_argument,
)
@dataclasses.dataclass
class UnsupportedFxNodeDiagnostic(Diagnostic):
unsupported_fx_node: torch.fx.Node | None = None
def __post_init__(self) -> None:
super().__post_init__()
# NOTE: This is a hack to make sure that the additional fields must be set and
# not None. Ideally they should not be set as optional. But this is a known
# limitation with `dataclasses`. Resolvable in Python 3.10 with `kw_only=True`.
# https://stackoverflow.com/questions/69711886/python-dataclasses-inheritance-and-default-values
if self.unsupported_fx_node is None:
raise ValueError("unsupported_fx_node must be specified.")
```
|
=========================================================================================================================================
SOURCE CODE FILE: dynamo_graph_extractor.py
LINES: 1
SIZE: 8.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\dynamo_graph_extractor.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# NOTE: This file is referenced by name at
# /opt/pytorch/torch/_dynamo/eval_frame.py::DONT_WRAP_FILES.
# introduced by https://github.com/pytorch/pytorch/pull/98894.
# If this file is renamed, moved, etc please update the reference there!
from __future__ import annotations
import contextlib
import functools
import inspect
from typing import Any, Callable, TYPE_CHECKING
import torch._dynamo
import torch.export as torch_export
import torch.fx
import torch.onnx
from torch.onnx._internal import _exporter_legacy, io_adapter
from torch.utils import _pytree as pytree
if TYPE_CHECKING:
from collections.abc import Mapping, Sequence
class _PyTreeExtensionContext:
"""Context manager to register PyTree extension."""
_extensions: dict[type, tuple[pytree.FlattenFunc, pytree.UnflattenFunc]]
def __init__(self) -> None:
self._extensions = {}
# Register PyTree extension for HuggingFace model output.
self._register_huggingface_model_output_extension()
def __enter__(self):
for class_type, (flatten_func, unflatten_func) in self._extensions.items():
pytree._private_register_pytree_node(
class_type,
flatten_func,
unflatten_func,
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for class_type in self._extensions:
pytree.SUPPORTED_NODES.pop(class_type)
def register_pytree_node(
self,
class_type: type,
flatten_func: pytree.FlattenFunc,
unflatten_func: pytree.UnflattenFunc,
):
"""Register PyTree extension for a custom python type.
Args:
class_type: The custom python type.
flatten_func: The flatten function.
unflatten_func: The unflatten function.
Raises:
AssertionError: If the custom python type is already registered.
"""
if class_type in pytree.SUPPORTED_NODES or class_type in self._extensions:
# PyTree node already registered.
# E.g., `huggingface/transformer` registers `ModelOutput` as PyTree node after
# https://github.com/huggingface/transformers/pull/25358.
return
self._extensions[class_type] = (flatten_func, unflatten_func)
def _register_huggingface_model_output_extension(self):
try:
from transformers import modeling_outputs # type: ignore[import]
except ImportError:
return
def model_output_flatten(
output: modeling_outputs.ModelOutput,
) -> tuple[list[Any], pytree.Context]:
return list(output.values()), (type(output), list(output.keys()))
def model_output_unflatten(
values: list[Any], context: pytree.Context
) -> modeling_outputs.ModelOutput:
output_type, keys = context
return output_type(**dict(zip(keys, values)))
# All 'ModelOutput' subclasses are defined under module 'modeling_outputs'.
named_model_output_classes = inspect.getmembers(
modeling_outputs,
lambda x: (
inspect.isclass(x)
and issubclass(x, modeling_outputs.ModelOutput)
and x is not modeling_outputs.ModelOutput
),
)
for _, class_type in named_model_output_classes:
self.register_pytree_node(
class_type,
model_output_flatten,
model_output_unflatten, # type: ignore[arg-type ]
)
class DynamoFlattenOutputStep(io_adapter.FlattenOutputStep):
"""Flatten nested collection and custom python types and return a flat list of elements.
Extended from :class:`io_adapter.FlattenOutputStep` to support flattening arbitrary
types via pytree extension. By default this supports many common user defined python
types such as :class:`ModelOutput` from HuggingFace transformers.
The pytree extension can be customized by passing in a ``_PyTreeExtensionContext``
object. See :meth:`_PyTreeExtensionContext.register_pytree_node`.
"""
def __init__(self, pytree_extension_context: _PyTreeExtensionContext | None = None):
super().__init__()
self._pytree_extension_context = (
pytree_extension_context or _PyTreeExtensionContext()
)
def apply(
self,
model_outputs: Any,
model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None,
) -> Sequence[Any]:
"""Flatten the model outputs, under the context of pytree extension."""
with self._pytree_extension_context:
return super().apply(model_outputs, model=model)
def _wrap_model_with_output_adapter(
model: torch.nn.Module | Callable,
output_adapter: DynamoFlattenOutputStep,
) -> Callable:
"""Wrap model with output adapter.
This is a helper function to enable :func:`dynamo.export` on models that produce
custom user defined types outputs. It wraps the model with an output adapter to
convert the outputs to :func:`dynamo.export` compatible types, i.e. :class:`torch.Tensor`.
The adapting logic is controlled by ``output_adapter``.
Args:
model: PyTorch model or function.
output_adapter: Output adapter to apply to model output.
Returns:
Wrapped model.
"""
model_func = model.forward if isinstance(model, torch.nn.Module) else model
# Preserve original function signature.
@functools.wraps(model_func)
def wrapped(*args, **kwargs):
return output_adapter.apply(model_func(*args, **kwargs), model=model)
return wrapped
class DynamoExport(_exporter_legacy.FXGraphExtractor):
"""Generates a FX GraphModule using torch.dynamo.export API
Args:
aten_graph: If True, exports a graph with ATen operators.
If False, exports a graph with Python operators.
"""
def __init__(
self,
aten_graph: bool | None = None,
):
super().__init__()
self.aten_graph = aten_graph or True
def generate_fx(
self,
options: _exporter_legacy.ResolvedExportOptions,
model: torch.nn.Module | Callable,
model_args: Sequence[Any],
model_kwargs: Mapping[str, Any],
) -> torch.fx.GraphModule:
# `dynamo.export` does not recognize custom user defined classes as output type.
# Apply wrapper to adapt the outputs back to `dynamo.export` compatible types,
# i.e. :class:`torch.Tensor`.
dynamo_flatten_output_step = DynamoFlattenOutputStep()
wrapped_model = _wrap_model_with_output_adapter(
model, dynamo_flatten_output_step
)
# Record the output adapter step.
self.output_adapter.append_step(dynamo_flatten_output_step)
# Translate callable to FX graph.
#
fake_mode = (
options.fake_context.fake_mode
if options.fake_context
else contextlib.nullcontext()
)
fx_mode = "symbolic" if options.dynamic_shapes else "fake"
with fake_mode: # type: ignore[attr-defined]
graph_module, graph_guard = torch._dynamo.export(
wrapped_model,
tracing_mode=fx_mode,
)(
*model_args,
**model_kwargs,
)
del graph_guard # Unused
torch._dynamo.reset()
# Export FX graph to ONNX ModelProto.
self.input_adapter.append_step(
io_adapter.FlattenInputWithTreeSpecValidationInputStep()
)
updated_model_args = self.input_adapter.apply(
*model_args, model=model, **model_kwargs
)
return self.pre_export_passes(options, model, graph_module, updated_model_args) # type: ignore[return-value]
def pre_export_passes(
self,
options: _exporter_legacy.ResolvedExportOptions,
original_model: torch.nn.Module | Callable,
fx_module: torch.fx.GraphModule,
fx_module_args: Sequence[Any],
):
return _exporter_legacy.common_pre_export_passes(
options, original_model, fx_module, fx_module_args
)
```
|
======================================================================================================================================
SOURCE CODE FILE: fx_onnx_interpreter.py
LINES: 8
SIZE: 34.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\fx_onnx_interpreter.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import inspect
import logging
import operator
import re
from typing import Callable, TYPE_CHECKING
import onnxscript
from onnxscript.function_libs.torch_lib import (
graph_building as onnxscript_graph_building,
)
import torch
import torch.fx
from torch.onnx import _type_utils as jit_type_utils
from torch.onnx._internal.fx import (
_pass,
diagnostics,
onnxfunction_dispatcher,
type_utils as fx_type_utils,
)
from torch.utils import _pytree
if TYPE_CHECKING:
from collections.abc import Sequence
def _fx_node_to_onnx_message_formatter(
fn: Callable,
self,
node: torch.fx.Node,
*args,
**kwargs,
) -> str:
return f"FX Node: {node.op}:{node.target}[name={node.name}]. "
def _fx_graph_to_onnx_message_formatter(
fn: Callable,
self,
fx_graph_module: torch.fx.GraphModule,
*args,
**kwargs,
) -> str:
return f"FX Graph: {fx_graph_module._get_name()}. "
def _location_from_fx_stack_trace(
node_stack_trace: str,
) -> diagnostics.infra.Location | None:
"""Extract location from FX node stack trace.
TODO(bowbao): Create fx utils module and move this function there.
Args:
node_stack_trace: The stack trace of the FX node. Example:
File "path/file.py", line 311, in <function>
<code>
| File "path/file2.py", line 389, in <function>
<code>
Returns:
location: The location of the FX node.
"""
if "File" not in node_stack_trace:
return None
lines = node_stack_trace.strip().split("\n")
idx = 0
while idx < len(lines) and "File" not in lines[idx]:
idx += 1
if idx + 1 >= len(lines):
return None
pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$")
matches = pattern.match(lines[idx].strip())
if matches:
uri = matches.group(1)
line_number = int(matches.group(2))
snippet = lines[idx + 1].strip()
return diagnostics.infra.Location(uri=uri, line=line_number, snippet=snippet)
return None
def _retrieve_or_adapt_input_to_graph_set(
fx_node_arg: fx_type_utils.Argument,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
):
"""Map FX value to TorchScript value.
When creating TorchScript graph from FX graph, we need a mapping from FX variable
to TorchScript variable. This function maps FX variable, fx_node_arg, to torch.jit.Value.
"""
from onnxscript import opset18 as op
onnx_tensor = fx_node_arg
if isinstance(onnx_tensor, torch.fx.Node):
# 1. fx_node_arg is a torch.fx.Node, which means
# fx_node_arg stands for the output of that torch.fx.Node.
# 2. fx_node_arg (variable in torch.fx.Graph) is be mapped to
# torch.jit.Value, fx_name_to_onnxscript_value[fx_node_arg.name],
# in TorchScript graph.
return fx_name_to_onnxscript_value[onnx_tensor.name]
elif isinstance(onnx_tensor, (tuple, list)) and any(
isinstance(node, torch.fx.Node)
and fx_type_utils.is_torch_symbolic_type(node.meta.get("val"))
for node in onnx_tensor
):
# This intends to handle dynamic axes. for example, if the input size of op.Expand
# is dynamic, each dimension would be variable (i.e., sym variable in Pytorch
# FX graph. Note that sym variable is mapped to tensor in ONNX Script world)
# calculated by other operators.
sequence_mixed_elements: list[
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...]
| list[int]
] = []
# onnx_tensor contains a list of scalars which could be one of
# - tensor with empty shape,
# - tensor with tensor with shape (1,),
# - torch.SymInt,
# - int
# - ...
# They should all be promoted to tensor with shape (1,)
# in order to call ONNX's Concat.
for tensor in onnx_tensor:
# Prepare `tensor` as input of ONNX's Concat.
if isinstance(
tensor, torch.fx.Node
) and fx_type_utils.is_torch_symbolic_type(tensor.meta.get("val")):
# In this case, tensor is a torch.SymInt from Dynamo's perspective.
# It might be mapped to tensor with shape () or (1,) in ONNX.
element_value = fx_name_to_onnxscript_value[tensor.name]
if isinstance(
element_value, onnxscript_graph_building.TorchScriptTensor
):
# All elements sequence_mixed_elements will be send to onnx's Concat
# as inputs. Therefore, they are required to have the same rank.
# Since tensors with rank=0 (i.e., scalar) cannot be concated, all
# scalars are promoted to tensors with shape (1,).
with onnxscript.evaluator.default_as(tracer):
element_value = op.Reshape(
element_value, # type: ignore[arg-type, type-var]
[1], # type: ignore[arg-type, type-var]
)
sequence_mixed_elements.append(element_value)
elif isinstance(tensor, int):
# NOTE: op.Concat doesn't support scalar, so we need to wrap it with
# dim, and onnx-script will promote it to tensor(int64)
sequence_mixed_elements.append([tensor])
else:
raise RuntimeError(
f"Unsupported type in sequence_mixed_elements: {type(tensor)}"
)
# Concat all the elements in the sequence.
# shapes are mapped to tensors in ONNX graph (TorchScriptGraph),
# so list of sym_ints is concatenated to a tensor before calling ONNX op.
# For example:
# inputs: [[2], [4], fx.Node(SymIntA), [1], fx.Node(SymIntB)]
# outputs: op.Concat([op.Constant(2), op.Constant(4), TorchScriptTensor(A), op.Constant(1), TorchScriptTensor(B)])
# onnx-script auto wraps python number with op.Constants,
# so we don't need to specifically process them.
with onnxscript.evaluator.default_as(tracer):
output = op.Concat(*sequence_mixed_elements, axis=0) # type: ignore[type-var]
output.dtype = torch.int64 # type: ignore[union-attr]
output.shape = [len(sequence_mixed_elements)] # type: ignore[union-attr]
return output
elif isinstance(onnx_tensor, (tuple, list)) and all(
isinstance(node, torch.fx.Node) or node is None for node in onnx_tensor
):
sequence_elements: list[
onnxscript_graph_building.TorchScriptTensor
| None
| tuple[onnxscript_graph_building.TorchScriptTensor, ...]
] = []
for tensor in onnx_tensor:
sequence_elements.append(
fx_name_to_onnxscript_value[tensor.name] if tensor is not None else None # type: ignore[index, union-attr]
)
return sequence_elements
if isinstance(onnx_tensor, torch.dtype):
onnx_tensor = int( # type: ignore[call-overload]
jit_type_utils.JitScalarType.from_dtype(onnx_tensor).onnx_type()
)
# NOTE: if device is specified in kwargs (not consumed), it's free to ignored. But
# if it's in args, we need to set it to string for dispatcher to match schema.
if isinstance(onnx_tensor, torch.device):
# torch.device is not supported by onnxscript (no op). We turn it into
# a string.
return str(onnx_tensor)
# all other cases, we do nothing.
return onnx_tensor
def filter_incompatible_and_dtype_convert_kwargs(kwargs):
"""Filter out kwargs that are not supported by onnxscript."""
filtered = {}
for key, value in kwargs.items():
if key in {
"layout",
"device",
"requires_grad",
"pin_memory",
"memory_format",
"implicit",
}:
continue
if key == "dtype":
if value is None:
# We omit if dtype is not provided, because onnxscript handles the
# default case.
continue
else:
value = int(jit_type_utils.JitScalarType.from_dtype(value).onnx_type()) # type: ignore[call-overload]
filtered[key] = value
return filtered
def _fill_tensor_shape_type(
onnxscript_values: onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
name: str,
expected_values: fx_type_utils.META_VALUE_TYPE
| list[fx_type_utils.META_VALUE_TYPE]
| tuple[fx_type_utils.META_VALUE_TYPE | None, ...],
):
"""Fill the meta information of onnxscript_values with that from the fx FakeTensor."""
if isinstance(expected_values, (list, tuple)) and not isinstance(
onnxscript_values, (list, tuple)
):
# ex: aten::split - in onnx_dtype: seq(tensor)
# onnxscript_values is a single tensor, but expected_values is a list of tensors.
return
flat_onnxscript_values, _ = _pytree.tree_flatten(onnxscript_values)
flat_expected_values, _ = _pytree.tree_flatten(expected_values)
for i, (onnxscript_value, expected_value) in enumerate(
zip(flat_onnxscript_values, flat_expected_values)
):
if expected_value is None:
# There is no shape/type from None.
# NOTE: according to https://github.com/pytorch/pytorch/blob/main/torch/_meta_registrations.py,
# None could be a valid value for return type, so we need to handle it.
# e.g. the function: meta__scaled_dot_product_flash() in cpu mode.
continue
elif fx_type_utils.is_torch_symbolic_type(expected_value):
# aten::sym_size output is a int, not a tensor, which stands
# for the size of one dim. We treat it as 1-D tensor.
onnxscript_value.dtype = fx_type_utils.from_sym_value_to_torch_dtype(
expected_value
)
onnxscript_value.shape = torch.Size([1])
elif isinstance(expected_value, (int, float, bool)):
onnxscript_value.dtype = fx_type_utils.from_scalar_type_to_torch_dtype(
type(expected_value)
)
onnxscript_value.shape = torch.Size([])
elif isinstance(expected_value, complex):
# From complex scalar to real representation
onnxscript_value_to_torch_dtype = (
fx_type_utils.from_scalar_type_to_torch_dtype(type(expected_value))
)
onnxscript_value.dtype = (
fx_type_utils.from_complex_to_float(onnxscript_value_to_torch_dtype)
if onnxscript_value_to_torch_dtype is not None
else None
)
onnxscript_value.shape = torch.Size([2])
elif fx_type_utils.is_torch_complex_dtype(expected_value.dtype):
# Like torch.view_as_real, we flatten complex tensors to real tensors with
# additional last dimension of 2
onnxscript_value.shape = torch.Size((*expected_value.size(), 2))
# complex64 -> float32, complex128 -> float64, etc.
onnxscript_value.dtype = fx_type_utils.from_complex_to_float(
expected_value.dtype
)
# Dispatcher needs to know the value is complex
onnxscript_value.is_complex = True
else:
# We set node output sizes to be dynamic to continue the model conversion,
# and inputs are also set to be dynamic in add_input().
onnxscript_value.shape = expected_value.size()
onnxscript_value.dtype = expected_value.dtype
# naming
if i > 0:
onnxscript_value.name = f"{name}_{i}"
else:
onnxscript_value.name = name
def _fill_in_default_kwargs(
node: torch.fx.Node,
) -> tuple[list[fx_type_utils.Argument], dict[str, fx_type_utils.Argument]]:
"""Find and Fill in the not provided kwargs with default values."""
# TODO: aten::sym_size has overload, but fx graph is using
# overloadpacket for some reasons.
# https://github.com/pytorch/pytorch/issues/97201
# We manually assigned overload for aten::sym_size.
if hasattr(node.target, "_schema"):
node_schema = node.target._schema # type: ignore[union-attr]
else:
node_schema = torch.ops.aten.sym_size.int._schema # type: ignore[union-attr]
# This function assumes the order of arguments in FX op is the
# same as the order of arguments in TorchScript op.
complete_args: list[fx_type_utils.Argument] = []
complete_kwargs: dict[str, fx_type_utils.Argument] = {}
if inspect.isbuiltin(node.target):
complete_args = list(node.args)
else:
for i, expected_arg in enumerate(node_schema.arguments):
if i < len(node.args):
complete_args.append(node.args[i])
elif expected_arg.name in node.kwargs:
complete_kwargs[expected_arg.name] = node.kwargs[expected_arg.name]
else:
# Get default from schema.
complete_kwargs[expected_arg.name] = expected_arg.default_value
return complete_args, complete_kwargs
def _wrap_fx_args_as_onnxscript_args(
complete_args: list[fx_type_utils.Argument],
complete_kwargs: dict[str, fx_type_utils.Argument],
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
) -> tuple[
Sequence[
onnxscript_graph_building.TorchScriptTensor
| str
| int
| float
| bool
| list
| complex
| None
],
dict[str, fx_type_utils.Argument],
]:
"""Map all FX arguments of a node to arguments in TorchScript graph."""
onnxscript_args = tuple(
_retrieve_or_adapt_input_to_graph_set(arg, fx_name_to_onnxscript_value, tracer)
for arg in complete_args
)
onnxscript_kwargs = filter_incompatible_and_dtype_convert_kwargs(complete_kwargs)
return onnxscript_args, onnxscript_kwargs
class FxOnnxInterpreter:
"""Stateless class to process FX graph Nodes and translate them into their ONNX counterparts.
All FX nodes described by [FX Graph](https://pytorch.org/docs/stable/fx.html#torch.fx.Graph) are supported.
Similarly to [FX Interpreter pattern](https://pytorch.org/docs/stable/fx.html#torch.fx.Interpreter), each FX node
must be implemented on its own method in this class.
Each operator's implementation returns either an `onnxscript.OnnxFunction` or
`onnxscript.TracedOnnxFunction` instance based on the dispatch algorithm. They can
also raise RuntimeError: If there are no overloaded functions available for the given FX node.
TODO: Convert methods to @staticmethod when the diagnostic system supports it
DO NOT ADD NEW ATTRIBUTES TO THIS CLASS!
"""
def __init__(
self,
diagnostic_context: diagnostics.DiagnosticContext,
):
# THIS SHOULD BE THE ONLY STATE IN THIS CLASS (constraint from diagnosticS API)
# TODO: Diagnostics API should be revised to get rid of this attribute.
# DO NOT add other class-level attributes.
self.diagnostic_context = diagnostic_context
@diagnostics.diagnose_call(
diagnostics.rules.fx_node_to_onnx,
diagnostic_message_formatter=_fx_node_to_onnx_message_formatter,
)
def run_node(
self,
node,
fx_graph_module: torch.fx.GraphModule,
onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,
onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
):
"""Execute a single FX node to produce its ONNX counterpart.
Args:
node: The FX node to be translated.
fx_graph_module: The FX graph module containing the node.
onnxfunction_dispatcher: The dispatcher to find the best matched ONNX op.
onnxscript_graph: The ONNX graph to be populated.
onnxscript_tracer: The tracer to trace the ONNX graph.
fx_name_to_onnxscript_value: The mapping from FX node name to ONNX Script value.
Raises:
RuntimeError: When a node.op is not supported.
"""
# Record stack trace of node in diagnostic.
node_stack_trace = node.stack_trace
if node_stack_trace:
diagnostic = self.diagnostic_context.inflight_diagnostic(
rule=diagnostics.rules.fx_node_to_onnx
)
with diagnostic.log_section(logging.INFO, "PyTorch source information"):
diagnostic.info("```\n%s\n```", node_stack_trace)
location = _location_from_fx_stack_trace(node_stack_trace)
if location is not None:
diagnostic.with_location(location)
if node.op == "placeholder":
self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)
elif node.op == "get_attr":
self.get_attr(
node,
onnxscript_graph,
fx_name_to_onnxscript_value,
fx_graph_module,
)
elif node.op == "call_function":
self.call_function(
node,
onnxscript_tracer,
fx_name_to_onnxscript_value,
onnxfunction_dispatcher,
fx_graph_module,
)
elif node.op == "call_method":
self.call_method(node)
elif node.op == "call_module":
self.call_module(
node,
onnxscript_graph,
fx_name_to_onnxscript_value,
onnxscript_tracer,
fx_graph_module,
onnxfunction_dispatcher,
)
elif node.op == "output":
self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)
else:
raise RuntimeError(f"Found node type not defined in torch.fx: {node.op}")
@diagnostics.diagnose_call(
diagnostics.rules.fx_graph_to_onnx,
diagnostic_message_formatter=_fx_graph_to_onnx_message_formatter,
)
def run(
self,
fx_graph_module: torch.fx.GraphModule,
onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph
| None = None,
) -> onnxscript_graph_building.TorchScriptGraph:
"""Analyze all FX nodes and trigger their ONNX translation.
Args:
fx_graph_module: FX graph module to be translated.
onnxfunction_dispatcher: ONNX function dispatcher.
parent_onnxscript_graph: The parent TorchScript graph. Must be provided if
`fx_graph_module` is a submodule. If not provided,
`fx_graph_module` is assumed to be the root module.
"""
diagnostic = self.diagnostic_context.inflight_diagnostic()
with diagnostic.log_section(logging.DEBUG, "FX Graph:"):
diagnostic.debug(
"```\n%s\n```",
diagnostics.LazyString(fx_graph_module.print_readable, False),
)
if parent_onnxscript_graph is not None:
# If parent_onnxscript_graph is provided, we assume fx_graph_module is a
# submodule representing a forward call of an nn.Module.
# Compose package and version where the nn.Module is defined as domain name
# for the local function.
onnx_meta: _pass.GraphModuleOnnxMeta | None = fx_graph_module.meta.get(
"onnx"
)
if onnx_meta is None:
raise RuntimeError(
f"ONNX meta is not found in submodule {fx_graph_module._get_name()}. "
f"Only submodules produced by `Modularize` pass is supported in ONNX export."
)
onnx_domain = onnx_meta.package_info.to_onnx_domain_string()
else:
# Leave as default domain name for the root module.
onnx_domain = None
onnxscript_graph = onnxscript_graph_building.TorchScriptGraph(
parent_onnxscript_graph, domain_name=onnx_domain
)
onnxscript_tracer = onnxscript_graph_building.TorchScriptTracingEvaluator(
onnxscript_graph
)
# In the following loop, a TorchScript graph is created to
# represent the input FX graph with ONNX symbols (e.g., onnx::add).
# To connect the values to nodes in the TorchScript graph, we maintain
# fx_name_to_onnxscript_value. Basically, we want to translate
# fx_tensor_x (type: torch.fx.Node) -> fx_node_1 -> fx_tensor_y (type: torch.fx.Node)
# to
# fx_name_to_onnxscript_value[fx_tensor_x.name] -> onnx_node_1 -> fx_name_to_onnxscript_value[fx_tensor_y.name]
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
] = {}
# TODO: Fix FakeTensorMode limitation asap
# We want to pass list of ints and floats to TorchScript graph correctly
# in _export_fx_to_ts, so we must disable FakeTensorMode. Otherwise, graph may
# receive FakeTensor and results runtime error. In addition, TorchScript-based
# ONNX exporter used in _ts_graph_to_onnx_model_in_protobuf is not compatible
# with FakeTensorMode.
with torch.utils._mode_utils.no_dispatch():
for node in fx_graph_module.graph.nodes:
self.run_node(
node,
fx_graph_module,
onnxfunction_dispatcher,
onnxscript_graph,
onnxscript_tracer,
fx_name_to_onnxscript_value,
)
with diagnostic.log_section(logging.DEBUG, "ONNX Graph:"):
diagnostic.debug("```\n%s\n```", onnxscript_graph.torch_graph) # type: ignore[attr-defined]
return onnxscript_graph
def placeholder(
self,
node: torch.fx.Node,
onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
):
# Input of graph.
# The node.meta["val"] is generated by FakeTensorProp.
# NOTE: add_input() intends to create nodes with shape/type
fake_tensor = node.meta.get("val", None)
# NOTE: During the tracing, when inputs are constants, they are represented
# by nodes with node.meta['val'] being None (nn.Module to dynamo_export)
# or nodes with node.meta['val'] being a builtin value (ExportedProgram to dynamo_export).
# Nonethless, the nodes are not consumed by others, so we don't need to
# create a TorchScriptTensor for them.
if fake_tensor is None or isinstance(fake_tensor, (int, float, bool, str)):
output = onnxscript_graph.add_input(
input_name=None,
)
elif isinstance(fake_tensor, torch.Tensor):
# NOTE: ONNX doesn't support tensor of complex64/complex128, so we
# convert them to float32/float64 with real representation.
if fx_type_utils.is_torch_complex_dtype(fake_tensor.dtype):
fake_tensor = torch.view_as_real(fake_tensor.resolve_conj())
output = onnxscript_graph.add_input(
input_name=node.name,
shape=fake_tensor.shape,
dtype=fake_tensor.dtype,
)
elif fx_type_utils.is_torch_symbolic_type(fake_tensor):
output = onnxscript_graph.add_input(
input_name=node.name,
shape=torch.Size([]),
dtype=fx_type_utils.from_sym_value_to_torch_dtype(fake_tensor),
)
else:
raise RuntimeError(
f"Unsupported type(node.meta['val']) for placeholder: {type(fake_tensor)}"
)
assert output is not None, (
f"Node creates None with target={node.target} and name={node.name}"
)
assert isinstance(output, onnxscript_graph_building.TorchScriptTensor)
assert isinstance(output, onnxscript.tensor.Tensor)
fx_name_to_onnxscript_value[node.name] = output
def call_function(
self,
node: torch.fx.Node,
onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
fx_graph_module: torch.fx.GraphModule,
):
# aten ops and other stateless functions.
if node.target == operator.getitem and isinstance(
fx_name_to_onnxscript_value[node.args[0].name], # type: ignore[union-attr,index]
tuple,
):
onnx_tensor_tuple = fx_name_to_onnxscript_value[node.args[0].name] # type: ignore[union-attr,index]
index = node.args[1]
value = onnx_tensor_tuple[index] # type: ignore[index]
assert value is not None, (
f"Node creates None with target={node.target} and name={node.name}"
)
assert isinstance(
value, (onnxscript_graph_building.TorchScriptTensor, tuple)
), type(value)
fx_name_to_onnxscript_value[node.name] = value
return
# Map FX inputs to ONNX inputs and fill optional inputs with default values.
# torch_args and torch_kwargs are for op-level validation
fx_args, fx_kwargs = _fill_in_default_kwargs(node)
onnx_args, onnx_kwargs = _wrap_fx_args_as_onnxscript_args(
fx_args,
fx_kwargs,
fx_name_to_onnxscript_value,
onnxscript_tracer,
)
# Dispatch to ONNX op through OpShema. The input argument dtypes are compared to
# function signature in OpSchema, and find the best matched overload.
symbolic_fn = onnxfunction_dispatcher.dispatch(
node=node,
onnx_args=onnx_args, # type: ignore[arg-type]
onnx_kwargs=onnx_kwargs,
diagnostic_context=self.diagnostic_context,
)
with onnxscript.evaluator.default_as(onnxscript_tracer):
output: (
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...]
) = symbolic_fn(*onnx_args, **onnx_kwargs)
assert output is not None, (
f"Node creates None with target={node.target}, name={node.name}, args={onnx_args}, kwargs={onnx_kwargs}"
)
# Assign type and shape from fx graph.
_fill_tensor_shape_type(output, node.name, node.meta["val"])
# One fx node could produce multiple outputs (e.g., tuple of tensors); in
# that case, v is a tuple of TorchScriptTensors.
assert isinstance(
output, (onnxscript_graph_building.TorchScriptTensor, tuple)
), type(output)
fx_name_to_onnxscript_value[node.name] = output
def output(
self,
node: torch.fx.Node,
onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
):
if isinstance(node.args[0], torch.fx.Node):
onnx_tensor_or_tensor_tuple = fx_name_to_onnxscript_value[node.args[0].name]
onnxscript_graph.register_outputs(onnx_tensor_or_tensor_tuple)
else:
# ONNX can't represent collection types (e.g., dictionary, tuple of tuple of
# tensor, etc), we flatten the collection and register each element as output.
flat_args, _ = _pytree.tree_flatten(node.args[0])
for arg in flat_args:
assert isinstance(arg, torch.fx.Node), (
f"arg must be a torch.fx.Node, not {type(arg)}"
)
onnx_tensor_or_tensor_tuple = fx_name_to_onnxscript_value[arg.name]
onnxscript_graph.register_outputs(onnx_tensor_or_tensor_tuple)
def call_method(self, node: torch.fx.Node):
# TODO(wechi): Support call_method.
raise RuntimeError("call_method is not supported yet.")
def call_module(
self,
node: torch.fx.Node,
parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
root_fx_graph_module: torch.fx.GraphModule,
onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
) -> None:
"""Export a fx.GraphModule submodule to ONNXScript graph.
The export process specifically targets `call_module` nodes that are created by
the exporter's `Modularize` pass. Each `call_module` node has an associated fx.GraphModule
by `node.target` underneath the root fx.GraphModule. These `call_module` nodes are exported as ONNX
function nodes. The related `sub_module` is then exported as an ONNX model local function,
which is represented by another `TorchScriptGraph`. This `TorchScriptGraph` sets the current
`onnxscript_graph` as its parent.
Args:
node: The call_module node in the FX graph that represents the submodule call.
parent_onnxscript_graph: The parent ONNXScript graph to which the ONNX function and
function node belong.
fx_name_to_onnxscript_value: The mapping from FX node name to ONNXScript value.
tracer: The tracer used to trace the ONNXScript graph.
root_fx_graph_module: The root FX module.
onnxfunction_dispatcher: The dispatcher.
"""
assert isinstance(node.target, str), (
f"node.target must be a str, not {type(node.target)} for node {node}."
)
sub_module = root_fx_graph_module.get_submodule(node.target)
assert isinstance(sub_module, torch.fx.GraphModule), (
f"sub_module must be a torch.fx.GraphModule, not {type(sub_module)} for node {node}."
)
sub_onnxscript_graph = self.run(
sub_module, onnxfunction_dispatcher, parent_onnxscript_graph
)
onnx_args, _ = _wrap_fx_args_as_onnxscript_args(
list(node.args), {}, fx_name_to_onnxscript_value, tracer
)
# TODO: We may want to consider other naming styles. The goal is to be stable and
# unique such that it can be easily identified in case of kernel substitution.
# Example for current style is combination of qualified module class name and
# module attribute name: `torch_nn_modules_conv_Conv2d_conv1`.
# Other naming styles such as qualified module class name made unique can also
# be considered.
unique_module_name = f"{sub_module._get_name()}_{node.target}"
outputs: (
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...]
) = parent_onnxscript_graph.add_module_call( # type: ignore[assignment]
unique_module_name, sub_onnxscript_graph, onnx_args
)
assert isinstance(
outputs, (onnxscript_graph_building.TorchScriptTensor, tuple)
), f"Unexpected outputs type {type(outputs)} for node {node}."
_fill_tensor_shape_type(outputs, node.name, node.meta["val"])
fx_name_to_onnxscript_value[node.name] = outputs
# Skip op_level_validation for call_module. Subgraph nodes are validated individually.
def get_attr(
self,
node: torch.fx.Node,
onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,
fx_name_to_onnxscript_value: dict[
str,
onnxscript_graph_building.TorchScriptTensor
| tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
fx_graph_module: torch.fx.GraphModule,
):
assert isinstance(node.target, str), f"node.target {node.target} is not a str."
attr_tensor = getattr(fx_graph_module, node.target)
assert isinstance(attr_tensor, torch.Tensor), f"{attr_tensor} is not a tensor."
# Parameter/buffer name cannot contain "."
# Revert from "/" to restore namespace formatting.
input_ = onnxscript_graph.add_initializer(
name=node.target.replace("/", "."),
value=attr_tensor,
)
assert isinstance(input_, onnxscript_graph_building.TorchScriptTensor)
assert isinstance(input_, onnxscript.tensor.Tensor)
fx_name_to_onnxscript_value[node.name] = input_
```
|
==============================================================================================================================================
SOURCE CODE FILE: fx_symbolic_graph_extractor.py
LINES: 1
SIZE: 10.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\fx_symbolic_graph_extractor.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import functools
from typing import Any, Callable, TYPE_CHECKING
import torch
import torch.fx
import torch.onnx
import torch.onnx._internal.fx.passes as passes
from torch.onnx._internal import _exporter_legacy, io_adapter
if TYPE_CHECKING:
from collections.abc import Mapping, Sequence
# Functions directly wrapped to produce torch.fx.Proxy so that symbolic
# data can flow through those functions. Python functions (e.g., `torch.arange`)
# not defined by pybind11 in C++ do not go though Python dispatcher, so
# they are not automatically patched by FX's Python dispatcher.
# The list below means `torch.arange`, `torch.tensor`, and so on will be
# patched.
_TORCH_METHODS_TO_PATCH: tuple[str, ...] = (
"arange",
"tensor",
"finfo",
"full",
"empty",
)
class ModuleExpansionTracer(torch.fx._symbolic_trace.Tracer):
"""Tracer to create ONNX-exporting friendly FX graph.
This tracer traces models into operators. That is,
the traced graph mostly contains call_function nodes and
has no call_module nodes. The call_module nodes
are problematic to the use of make_fx(...) in ONNX
exporter.
"""
def is_leaf_module(
self, module: torch.nn.Module, module_qualified_name: str
) -> bool:
# This returns False so that all sub-modules are considered as not leaves
# and therefore expanded into operators in
# torch.fx._symbolic_trace.Tracer.call_module.
return False
def to_bool(self, obj: torch.fx.Proxy) -> bool:
# FIXME: This is a hack to tracing through if-else Python blocks.
# It may generate incorrect ONNX graphs if the if-else block
return False
def _wrap_for_symbolic_trace(target: Callable) -> tuple[Callable, Callable]:
"""This function wraps ```target`` for symbolic tracing.
This function wraps ```target``` so that its wrapper produces
torch.fx.Proxy in symbolic computation. The returned values are
the wrapper and then the original function. Per `_TORCH_METHODS_TO_PATCH`,
this function shall receive `torch.arange`, `torch.tensor`, etc. as inputs.
"""
@functools.wraps(target)
def wrapper(*args, **kwargs):
proxy = None
def check_has_proxy(v):
if isinstance(v, torch.fx.Proxy):
nonlocal proxy
proxy = v
torch.fx.node.map_aggregate(args, check_has_proxy)
torch.fx.node.map_aggregate(kwargs, check_has_proxy)
if proxy is not None:
return proxy.tracer.create_proxy("call_function", target, args, kwargs)
else:
return target(*args, **kwargs)
return wrapper, target
def _module_expansion_symbolic_trace(
root: torch.nn.Module | Callable[..., Any],
concrete_args: dict[str, Any] | None = None,
) -> torch.fx.GraphModule:
"""Trace a callable into FX graph.
When "root" is torch.nn.Module, calls to its submodule (type: torch.nn.Module) will be
expanded into operators (e.g., torch.matmul, torch.add, +, and -) to simplify graph
structure.
"""
# For functions doesn't support symbolic tracing, create wrappers
# which produce symbolic results during tracing.
patched_torch_methods = {
target_name: _wrap_for_symbolic_trace(getattr(torch, target_name))
for target_name in _TORCH_METHODS_TO_PATCH
}
# Set the symbolic-tracing friendly functions so that `tracer.trace` below
# can work.
for name, (wrapper, _) in patched_torch_methods.items():
setattr(torch, name, wrapper)
try:
# Set up a tracer.
tracer = ModuleExpansionTracer()
# Trace the model.
graph = tracer.trace(root, concrete_args)
name = (
root.__class__.__name__
if isinstance(root, torch.nn.Module)
else root.__name__
)
return torch.fx.GraphModule(tracer.root, graph, name)
finally:
# Revert the patches for symbolic tracing.
for name, (_, wrapped) in patched_torch_methods.items():
# wrapped is the original version of `torch.name`.
setattr(torch, name, wrapped)
# TODO: Migrate to `DynamoExporter` after fake model tracing is supported.
# Proposal at https://github.com/pytorch/pytorch/issues/95900.
class FXSymbolicTracer(_exporter_legacy.FXGraphExtractor):
"""Generates a FX GraphModule using torch.fx.symbolic_trace API
Args:
concrete_args: Inputs to be partially specialized
It can be used to remove control flow or data structures.
For example::
def f(a, b):
if b == True:
return a
else:
return a*2
FX can typically not trace through this due to the presence of control
flow. However, we can use `concrete_args` to specialize on the value of
`b` to trace through this::
f = fx.symbolic_trace(f, concrete_args={'b': False})
assert f(3, False) == 6
Note that although you can still pass in different values of `b`, they will be ignored.
It can also be used to eliminate data-structure handling from
our function. This will use pytrees to flatten your input. To avoid
overspecializing, pass in `fx.PH` for values that shouldn't be
specialized. For example::
def f(x):
out = 0
for v in x.values():
out += v
return out
f = fx.symbolic_trace(
f,
concrete_args={"x": {"a": fx.PH, "b": fx.PH, "c": fx.PH}},
)
assert f({"a": 1, "b": 2, "c": 4}) == 7
"""
def __init__(self, concrete_args: dict[str, Any] | None = None):
super().__init__()
# TODO: plumb ``concrete_args`` to symbolic_trace call at ``generate_fx``
self.concrete_args = concrete_args
def _trace_into_fx_graph_via_fx_symbolic_trace(
self, model, model_args, model_kwargs
) -> torch.fx.GraphModule:
# Bind model args and kwargs with model signature to retrieve default values
# of unprovided arguments. These are then used to construct ``concrete_args``.
bind_input_step = io_adapter.BindInputStep(
torch.onnx.utils.model_signature(model)
)
self.input_adapter.append_step(bind_input_step)
_, named_args = bind_input_step.apply(model_args, model_kwargs, model=model)
# Create inputs to call symbolic trace (torch.fx.symbolic_trace)
# Example content of concrete_args:
# concrete_args["x"] = torch.fx._symbolic_trace.PH
# concrete_args["b"] = 1
# where "x" and "b" are argument names in "signature".
concrete_args = {}
for param_name, param_value in named_args.items():
if isinstance(param_value, torch.Tensor):
# param_value can be, e.g., a real tensor or a fake tensor.
# param_value is treated as substitutable tensor symbol (aka placeholder).
concrete_args[param_name] = torch.fx._symbolic_trace.PH
else:
concrete_args[param_name] = param_value
# Merge kwargs back into args since that is the format FX graph expects.
merge_kwargs_step = io_adapter.MergeKwargsIntoArgsInputStep()
self.input_adapter.append_step(merge_kwargs_step)
return _module_expansion_symbolic_trace(model, concrete_args=concrete_args)
def generate_fx(
self,
options: _exporter_legacy.ResolvedExportOptions,
model: torch.nn.Module | Callable,
model_args: Sequence[Any],
model_kwargs: Mapping[str, Any],
) -> torch.fx.GraphModule:
diagnostic_context = options.diagnostic_context
graph_module = self._trace_into_fx_graph_via_fx_symbolic_trace(
model, model_args, model_kwargs
)
# Make sure all placeholder nodes are executed before get_attr nodes.
# Otherwise, inputs can interleave with initializers in the final ModeoProto.graph.input.
# Basically, we want
# ModeoProto.graph.input =
# [input_0, input_1, ..., input_n, weight_0, weight_1, ..., weight_m]
# and we don't want
# ModeoProto.graph.input =
# [input_0, weight_0, input_1, weight_1, ..., input_n, weight_0, weight_1, ..., weight_m]
graph_module = passes.MovePlaceholderToFront(
diagnostic_context, graph_module
).run()
# To save memory, move get_attr to input so that the generated model doesn't
# have weigh tensors. "replaced_attrs" are a tuple of replaced weight tensors.
replace_get_attr_with_placeholder_pass = passes.ReplaceGetAttrWithPlaceholder(
diagnostic_context, graph_module
)
graph_module = replace_get_attr_with_placeholder_pass.run()
replaced_attrs = replace_get_attr_with_placeholder_pass.replaced_attrs
append_extra_input_step = io_adapter.LiftParametersAndBuffersIntoArgsInputStep(
replaced_attrs
)
self.input_adapter.append_step(append_extra_input_step)
# Move all newly created placeholder nodes to the front of the graph.
graph_module = passes.MovePlaceholderToFront(
diagnostic_context, graph_module
).run()
# Finalize the graph editing.
graph_module.recompile()
updated_model_args = self.input_adapter.apply(
*model_args, model=model, **model_kwargs
)
return self.pre_export_passes(options, model, graph_module, updated_model_args) # type: ignore[return-value]
def pre_export_passes(
self,
options: _exporter_legacy.ResolvedExportOptions,
original_model: torch.nn.Module | Callable,
fx_module: torch.fx.GraphModule,
fx_module_args: Sequence[Any],
):
return _exporter_legacy.common_pre_export_passes(
options, original_model, fx_module, fx_module_args
)
```
|
==========================================================================================================================================
SOURCE CODE FILE: onnxfunction_dispatcher.py
LINES: 10
SIZE: 37.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\onnxfunction_dispatcher.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Dispatcher for AtenLib functions from onnx-script."""
from __future__ import annotations
import logging
import operator
import types
from typing import Any, Callable, TYPE_CHECKING
import torch
import torch._ops
import torch.fx
from torch.onnx._internal.fx import (
diagnostics,
registration,
type_utils as fx_type_utils,
)
if TYPE_CHECKING:
from collections.abc import Sequence
import onnxscript # type: ignore[import]
from onnxscript.function_libs.torch_lib import ( # type: ignore[import]
graph_building as onnxscript_graph_building,
)
from torch.onnx import OnnxRegistry
def _find_opschema_matched_symbolic_function_disagnostic_message_formatter(
fn: Callable,
self,
node: torch.fx.Node,
default_and_custom_functions: list[registration.ONNXFunction],
*args,
**kwargs,
) -> str:
"""Format the diagnostic message for the nearest match warning."""
all_function_overload_names = ""
for symbolic_func in default_and_custom_functions:
overload_func = symbolic_func.onnx_function
all_function_overload_names += f"ONNX Node: {overload_func.name}[opset={overload_func.opset};is_custom={symbolic_func.is_custom}]. \n" # noqa: B950
return f"FX Node: {node.target}. \n{all_function_overload_names}"
def _find_operator_overloads_in_onnx_registry_disagnostic_message_formatter(
fn: Callable,
self,
node: torch.fx.Node,
*args,
**kwargs,
) -> str:
"""Format the diagnostic message for the nearest match warning."""
return f"Searching operator overload: '{node.target}' in onnx registry...\n"
class OnnxFunctionDispatcher:
"""A dispatcher that finds the best ONNX Function for ATen/Custom operators.
It uses the `torch.ops` name to find the function. If not found, it falls back to default.
Otherwise, the best match is found among all function overloads. An exact match has
higher precedence over the closest ones.
Below is a breakdown on how the dispatch mechanism works:
1. Use the torch.ops name to find the function:
a. Check if the ATen overload exists in the registry.
b. If not, check if the default overload exists in the registry.
2. Find the nearest match among all overloaded functions:
a. If the types match perfectly, select the function.
b. Otherwise, find the nearest one with the highest matching score. Because of
the potential wrongly annotated dtypes and attributes matching, we use
nearest match to find the best function once the aten name is targeted.
3. Tie-breaker: If there are multiple nearest matches, we will select the one with
the highest matching score.
NOTE: The nearest match `doesn't guarantee` a correct match, and a warning message is logged.
"""
def __init__(
self,
onnx_registry: OnnxRegistry,
diagnostic_context: diagnostics.DiagnosticContext,
):
"""Initialize the ONNX Function dispatcher.
Args:
onnx_registry: The ONNX registry.
diagnostic_context: The diagnostic context to use for reporting errors.
"""
self.onnx_registry = onnx_registry
self.diagnostic_context = diagnostic_context
def dispatch(
self,
node: torch.fx.Node,
onnx_args: Sequence[
fx_type_utils.TensorLike | str | int | float | bool | list | complex | None
],
onnx_kwargs: dict[str, fx_type_utils.Argument],
diagnostic_context: diagnostics.DiagnosticContext,
) -> onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction:
"""Dispatches an ONNX function based on the given FX node, arguments, and keyword arguments.
Args:
node: The TorchFX node to dispatch the function for.
onnx_args: The arguments of the ONNX function.
onnx_kwargs: The keyword arguments of the ONNX function.
diagnostic_context: The diagnostic context to use for reporting errors.
Returns:
Either an `onnxscript.OnnxFunction` or `onnxscript.TracedOnnxFunction` instance based on the dispatch algorithm.
Raises:
RuntimeError: If there are no overloaded functions available for the given FX node.
"""
# If there are no overloaded functions available for the given FX node, raise an
# unsupported error
default_and_custom_functions = self.get_function_overloads(
node, diagnostic_context
)
# If there are overloaded functions available, we will find one that perfect or
# nearest matches the given arguments and keyword arguments
return self._find_the_perfect_or_nearest_match_onnxfunction(
node,
default_and_custom_functions,
onnx_args,
onnx_kwargs,
diagnostic_context,
)
def _filter_or_keep_complex(
self,
node,
default_and_custom_functions: list[registration.ONNXFunction],
diagnostic_context: diagnostics.DiagnosticContext,
) -> list[registration.ONNXFunction]:
"""Filter the complex functions if the input has complex dtype."""
args_with_complex_dtype = [_is_arg_with_complex_dtype(arg) for arg in node.args]
if any(args_with_complex_dtype):
default_and_custom_functions = [
func for func in default_and_custom_functions if func.is_complex
]
# If we can't find the complex function group, raise error.
if not default_and_custom_functions:
op_full_name = self._get_aten_name(
node, diagnostic_context
).qualified_name()
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Cannot find any COMPLEX symbolic function for {op_full_name}, "
f"which should be registered under {node.target}.",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
else:
default_and_custom_functions = [
func for func in default_and_custom_functions if not func.is_complex
]
# If we can't find the complex function group, raise error.
if not default_and_custom_functions:
op_full_name = self._get_aten_name(
node, diagnostic_context
).qualified_name()
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Can ONLY find COMPLEX symbolic function for {op_full_name}, "
f"which should be registered under {node.target}.",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
return default_and_custom_functions
@diagnostics.diagnose_call(
diagnostics.rules.find_opschema_matched_symbolic_function,
diagnostic_message_formatter=_find_opschema_matched_symbolic_function_disagnostic_message_formatter,
)
def _find_the_perfect_or_nearest_match_onnxfunction(
self,
node: torch.fx.Node, # this is used in diagnostic_message_formatter
default_and_custom_functions: list[registration.ONNXFunction],
onnx_args: Sequence[
fx_type_utils.TensorLike | str | int | float | bool | list | complex | None
],
onnx_kwargs: dict[str, fx_type_utils.Argument],
diagnostic_context: diagnostics.DiagnosticContext,
):
"""Find the perfect/nearest matched OnnxFunction for the given FX node, arguments, and keyword arguments.
Args:
default_and_custom_functions: The list includes overloaded functions, with
custom ones appearing after the default ones.
onnx_args: Arguments organized in PyTorch inputs way.
onnx_kwargs: Keyword arguments organized in PyTorch inputs way.
diagnostic_context: The diagnostic context to use for reporting errors.
Returns:
Either an `onnxscript.OnnxFunction` or `onnxscript.TracedOnnxFunction` instance based on the dispatch algorithm.
Raises:
RuntimeError: If there are no overloaded functions available for the given FX node.
"""
overload_match_ranking: dict[registration.ONNXFunction, int | None] = {}
diagnostic = diagnostic_context.inflight_diagnostic()
# Iterate the overloaded functions in reverse order to prioritize the custom ones
# over the default ones, and find the perfect match.
for symbolic_function in reversed(default_and_custom_functions):
function_opschema = _OnnxSchemaChecker(symbolic_function.onnx_function)
# NOTE: 1. If the perfect match is found, return the function
if function_opschema.perfect_match_inputs(
diagnostic, onnx_args, onnx_kwargs
):
return symbolic_function.onnx_function
# Record the match score for the nearest match if it's not the perfect match
overload_match_ranking[symbolic_function] = function_opschema.match_score
# NOTE: 2. If there is no perfect match, find the nearest match among the nearest matche candidates
# If there is no nearest match, raise an error
overload_match_ranking = {
k: v for k, v in overload_match_ranking.items() if v is not None
}
if not overload_match_ranking:
# If there are no overloaded functions available for the given FX node, raise an
# unsupported error
op_full_name = self._get_aten_name(
node, diagnostic_context
).qualified_name()
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Cannot find any perfect/nearest match of symbolic function for {op_full_name},"
f"which should be registered under {node.target}.",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
diagnostic.warning(
"### Exact match is not found!\n"
"Cannot find a perfect match of symbolic overload, "
"a nearest match is found. Please check the ONNX output carefully. \n",
)
diagnostic.level = diagnostics.levels.WARNING
# NOTE: 3. Tie breaker: if there are multiple nearest matches, we will choose the one
# that is custom first. If there are multiple custom ones, we will choose the one
# that is added lastly in the list.
symbolic_function_list: list[registration.ONNXFunction] = sorted(
overload_match_ranking,
key=lambda k: (
overload_match_ranking[k],
k.is_custom,
default_and_custom_functions.index(k),
),
reverse=True,
)
return symbolic_function_list[0].onnx_function
def _get_aten_name(
self, node: torch.fx.Node, diagnostic_context: diagnostics.DiagnosticContext
) -> registration.OpName:
"""Get the OpName from the target.
Args:
node: The TorchFX node to get the aten name for.
diagnostic_context: The diagnostic context to use for reporting errors.
Returns:
The internal op name within dataclass: registration.OpName.
"""
if node.target == operator.getitem:
return registration.OpName.from_name_parts(
namespace="aten", op_name="getitem"
)
if isinstance(node.target, torch._ops.OpOverloadPacket):
# aten::sym_size is the only OverloadPacket that we support.
# schema: aten::sym_size(Tensor self, int dim) -> Tensor
if node.target != torch.ops.aten.sym_size:
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Unsupported OverloadPacket: {node.target}, aten.sym_size is the only allowed OverloadPacket!",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
# TODO(titaiwang): aten::sym_size has overload, but fx graph is using
# overloadpacket for some reasons.
# https://github.com/pytorch/pytorch/issues/97201
aten_op_default = node.target.default
return registration.OpName.from_op_overload(op_overload=aten_op_default) # type: ignore[no-any-return]
if isinstance(node.target, types.BuiltinFunctionType):
# Make sure it's symint/symfloat consuming builtin ops.
for node_arg in node.args:
if (not isinstance(node_arg, (torch.fx.Node, int, float))) or (
isinstance(node_arg, torch.fx.Node)
and not fx_type_utils.is_torch_symbolic_type(node_arg.meta["val"])
):
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Unsupported node arg: {node_arg} (type {type(node_arg)}) with builtin function: {node.target},"
" only int/float/SymInt/SymFloat is supported with built-in ops!",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
return registration.OpName.from_builtin_function(node.target)
if isinstance(node.target, torch._ops.OpOverload):
return registration.OpName.from_op_overload(op_overload=node.target)
# Unexpected target, raise error.
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Unknown call_function target: {node.target}",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
@diagnostics.diagnose_call(
diagnostics.rules.find_operator_overloads_in_onnx_registry,
diagnostic_message_formatter=_find_operator_overloads_in_onnx_registry_disagnostic_message_formatter,
)
def get_function_overloads(
self,
node: torch.fx.Node,
diagnostic_context: diagnostics.DiagnosticContext,
) -> list[registration.ONNXFunction]:
"""Get the function overloads from the registry.
Args:
node: The node to get the function overloads for.
diagnostic_context: The diagnostic context to use for reporting errors.
Returns:
The list contains ONNXFunctions, starting with the default ones and
followed by any custom ones.
"""
internal_opname: registration.OpName = self._get_aten_name(
node=node, diagnostic_context=diagnostic_context
)
# If the ATen/Custom operators are not registered, the group will be None.
# And non-registered ATen/Custom operators will trigger error in the next step.
function_group: list[registration.ONNXFunction] | None = None
function_group = self.onnx_registry.get_op_functions(
namespace=internal_opname.namespace,
op_name=internal_opname.op_name,
overload=internal_opname.overload,
)
# NOTE: Fall back to default overload if the ONNX registry doesn't have the overload.
if function_group is None:
function_group = self.onnx_registry.get_op_functions(
namespace=internal_opname.namespace,
op_name=internal_opname.op_name,
overload=None,
)
if function_group is not None:
op_full_name = internal_opname.qualified_name()
diagnostic = diagnostic_context.inflight_diagnostic()
diagnostic.warning(
"### The operator overload is not found in onnx registry!\n"
"Cannot find the operator overload in onnx registry, but "
"the default overload is found. Please check the ONNX output carefully. \n",
)
diagnostic.level = diagnostics.levels.WARNING
if function_group is not None:
# NOTE: If the input has complex dtype, we will only dispatch to the complex functions.
function_group = self._filter_or_keep_complex(
node, function_group, diagnostic_context
)
return function_group # type: ignore[return-value]
op_full_name = internal_opname.qualified_name()
diagnostic = diagnostics.UnsupportedFxNodeDiagnostic(
diagnostics.rules.no_symbolic_function_for_call_function,
diagnostics.levels.ERROR,
f"Cannot find symbolic function for {op_full_name}, "
f"which should be registered under {node.target}.",
unsupported_fx_node=node,
)
diagnostic_context.log(diagnostic)
raise diagnostics.RuntimeErrorWithDiagnostic(diagnostic)
class _OnnxSchemaChecker:
"""
The OnnxSchemaChecker class is a checker for ONNX OpSchema and param schema.
It provides methods to check for input compatibility based on the OpSchema. It also
provides a matching score to indicate how well the OpSchema matches the input and
kwargs types. A function will be evaluated as perfect match, nearest match eligible,
or no match.
Here are some common examples in categories:
1. [NOTE: Perfect match]: The number of inputs and attributes are exactly the same as
the OpSchema. The types of inputs and attributes are exactly the same as the
OpSchema.
```python
inputs = (Tensor[2, 3], Tensor[2, 3])
attributes = {"alpha": 1.0}
@torch_op("aten::op")
def aten_op(self: TReal, other: TReal, alpha: float = 1) -> TReal: ...
```
Result: Perfect match.
2. [NOTE: Optional input]: The dispatcher recognizes optional inputs. However,
the input can't be ignored. None must be provided.
```python
inputs = (Tensor([2, 3]), None)
attributes = {}
aten_op(X: TTensor, Y: Optional[INT64]):
...
```
Result: Perfect match.
Real example: `aten::convolution`.
3. [NOTE: Different attributes]: If an attribute is provided with value, it's
a must to match the attribute in function signature.
```python
inputs = (Tensor([2, 3]),)
attributes = {"a":1, "b":2}
aten_op(X: TTensor, a: int):
...
```
Result: No match.
Real example: `aten::div` vs `aten::div.Tensor_mode`.
4. [NOTE: Default attributes]: Default attribute will fill in the value into
inputs/attributes.
```python
inputs = (Tensor([2, 3]),)
attributes = {}
aten_op(X: TTensor, a: int = 3):
...
```
Result: Perfect match.
Real example: `aten::clone`
5. [NOTE: Ignore attribute with None value]: The attributes with None value
will be ignored in matching.
```python
inputs = (Tensor([2, 3]),)
attributes = {"a": None}
aten_op(X: TTensor):
...
```
Result: Perfect match.
```python
inputs = (Tensor([2, 3]),)
attributes = {"a": None}
aten_op(X: TTensor, a: int = 3):
...
```
Result: Nearest match eligible.
Real example: `aten::div` vs `aten::div.Tensor_mode`.
Attributes:
onnxfunction: The OnnxFunction.
param_schema: The parameter schema defined in the OnnxFunction.
op_schema: The ONNX OpSchema.
type_constraints: The type constraints defined in the OpSchema.
attributes: The attributes defined in the OpSchema.
_matching_score: The matching score of the OnnxSchemaChecker .
"""
def __init__(
self,
onnxfunction: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction,
):
"""Initialize the OnnxSchemaChecker .
Args:
onnxfunction: The OnnxFunction.
"""
self.onnxfunction = onnxfunction
self.param_schema = self.onnxfunction.param_schemas()
op_schema = self.onnxfunction.op_schema
# Both `OnnxFunction` and `TracedOnnxFunction` never return None for `op_schema`.
# However their base class would. Hence return type is annotated as Optional[OpSchema].
assert op_schema is not None
self.op_schema = op_schema
self.type_constraints = {
# "T": {"tensor(int64)"}
constraint.type_param_str: set(constraint.allowed_type_strs)
for constraint in self.op_schema.type_constraints
}
self.attributes = self.op_schema.attributes
self._matching_score: int | None = None
@property
def match_score(self) -> int | None:
"""The matching score of the OnnxSchemaChecker .
If this remains None, it means the matching score has not been calculated,
and it's not a nearest match candidate.
Returns:
The matching score of the OnnxSchemaChecker .
"""
return self._matching_score
def perfect_match_inputs(
self,
diagnostic: diagnostics.Diagnostic,
args: Sequence[
fx_type_utils.TensorLike | str | int | float | bool | list | complex | None
],
kwargs: dict[str, fx_type_utils.Argument],
) -> bool:
"""Check if the inputs perfectly match the OpSchema requirements.
The definition of perfect match is that the input types are all in the type
constraints and the number of inputs matches the number of inputs in the
OpSchema.
Checking steps:
1. The function signature matches the inputs number, and attribute names.
2. The input/attribute types are all in the type constraints.
A function should at least pass the first step to be eligible for the
nearest matching.
Args:
diagnostic: The diagnostic to use for logging detailed info.
args: The input arguments organized in PyTorch inputs way.
kwargs: The input keyword arguments organized in PyTorch inputs way.
Returns:
True if the inputs match the requirements, False otherwise.
"""
# NOTE: OnnxFunction does not have the same function signature as the original
# PyTorch operator. We need to separate the input/attributes from the arguments.
(
function_inputs,
function_attributes,
) = self._separate_input_attributes_from_arguments(
self.param_schema,
args,
kwargs,
fill_defaults=True, # fill defaults for optional arguments to match
)
with diagnostic.log_section(logging.INFO, "Checking perfect match..."):
diagnostic.info(
"%s",
diagnostics.LazyString(diagnostics.format_argument, self.onnxfunction),
)
# NOTE: 1. Check if the input number and attribute names match the
# OpSchema. If it's not, we know the function is not eligible to be a perfect
# match, nor a nearest match.
# We use is_perfect_match to postpone the return value to the end
# of the function, as we want to log all the mismatch info.
is_perfect_match = True
if len(function_inputs) != len(self.op_schema.inputs):
with diagnostic.log_section(
logging.INFO, "Failed: input number mismatch!"
):
diagnostic.info(
"Actual %d vs expected %d",
len(function_inputs),
len(self.op_schema.inputs),
)
diagnostic.info("The function is not a nearest match candidate.")
is_perfect_match = False
if set(function_attributes) != set(self.attributes):
with diagnostic.log_section(
logging.INFO, "Failed: attribute mismatch!"
):
diagnostic.info(
"%s",
diagnostics.LazyString(
lambda: f"Actual {set(function_attributes)} vs expected {set(self.attributes)}",
),
)
diagnostic.info("The function is not a nearest match candidate.")
is_perfect_match = False
# If it's already not a perfect match, we can return False directly. Further
# checking is only for the functions that are eligible for nearest match.
if not is_perfect_match:
return False
# NOTE: 2. The dtypes of inputs and attributes should be in the
# type constraints of the OpSchema. If they are not, we know the function is not
# eligible to be a perfect match, but can be a nearest match candidate.
for schema_input, torch_input in zip(
self.op_schema.inputs, function_inputs
):
torch_input_compatible_types = _find_onnx_data_type(torch_input)
allowed_types = self.type_constraints[schema_input.type_str]
if not allowed_types.intersection(
torch_input_compatible_types
) and not any(
fx_type_utils.is_optional_onnx_dtype_str(onnx_type_str)
for onnx_type_str in allowed_types
):
# If torch_input_compatible_types isn't in allowed_types
# of this input defined in the OpSchema, we know the function
# and the input are not compatible
with diagnostic.log_section(
logging.INFO,
"Failed: input type mismatch for input '%s'!",
schema_input.name,
):
diagnostic.info(
"Actual %s vs\nExpected %s",
torch_input_compatible_types,
allowed_types,
)
is_perfect_match = False
for attribute_name, attribute in function_attributes.items():
if not self._match_onnx_attribute_type(attribute_name, attribute):
# If the attribute type of the OpSchema and the attribute type don't match,
# we know the function and the input are not compatible
with diagnostic.log_section(
logging.INFO,
"Failed: attribute '%s' type mismatch!",
attribute_name,
):
diagnostic.info(
"Actual %s vs\nExpected %s",
type(attribute),
self.attributes[attribute_name].type,
)
is_perfect_match = False
# NOTE: This is still a candidate for nearest match, as it only mismatches attributes on dtype.
self._record_matching_score(function_inputs, function_attributes)
diagnostic.info("match score: %d", self.match_score)
return is_perfect_match
def _match_onnx_attribute_type(
self,
attribute_name: str,
attribute: fx_type_utils.Argument | onnxscript_graph_building.TorchScriptTensor,
is_sequence: bool = False,
) -> bool:
if isinstance(attribute, (int, float, bool, str)):
attribute_onnx_type = fx_type_utils.from_python_type_to_onnx_attribute_type(
type(attribute), is_sequence=is_sequence
)
if attribute_onnx_type != self.attributes[attribute_name].type:
return False
# If the attribute is an empty list, we don't know the type of the list
# so it's a mismatch
elif isinstance(attribute, (list, tuple)) and attribute:
return self._match_onnx_attribute_type(
attribute_name, attribute[0], is_sequence=True
)
else:
# NOTE: Unrecognized attribute type
return False
return True
def _record_matching_score(
self,
inputs: Sequence[
fx_type_utils.TensorLike | str | int | float | bool | list | complex | None
],
attributes: dict[str, fx_type_utils.Argument],
):
"""Calculate the inputs matching score of the OpSchema requirements to find the nearest match.
Only the functions which have the same number of inputs and attributes as the
OpSchema are eligible to be a nearest match candidate. Thus, we don't need to
check the length of inputs and attributes here, and only check the types of
inputs and attributes.
How the matchsing score is calculated:
score += 1 if one input/attribute type is in the type constraints.
Limitations:
None/NoeType/[] could result in zero matches, and the same score of overloads,
which will be recorded in SARIF.
Args:
inputs: The input arguments.
attributes: The input keyword arguments.
Returns:
True if the inputs match the requirements, False otherwise.
"""
self._matching_score = 0
# If they have different length of arguments, the score would be lower to those
# functions which have the same length of arguments.
for schema_input, torch_input in zip(self.op_schema.inputs, inputs):
torch_input_compatible_types = _find_onnx_data_type(torch_input)
allowed_types = self.type_constraints[schema_input.type_str]
if allowed_types.intersection(torch_input_compatible_types):
# If torch_input_compatible_types is in allowed_types
# of this input defined in the OpSchema, we know the function
# and the input are compatible
self._matching_score += 1
# NOTE: The penalty is applied to those functions which have different attributes.
for attribute_name, attribute_proto in self.attributes.items():
attribute = attributes[attribute_name]
attribute_onnx_type = fx_type_utils.from_python_type_to_onnx_attribute_type(
type(attribute)
)
if attribute_onnx_type != attribute_proto.type:
# If the attribute type of the OpSchema and the attribute type don't match,
# we know the function and the input are not compatible
self._matching_score -= 1
# NOTE: Referenced from onnxscript internal function.
# Importing this function makes the code less robust, as it is not a public API.
def _separate_input_attributes_from_arguments(
self,
param_schemas: Sequence[onnxscript.values.ParamSchema],
args: Sequence[
fx_type_utils.TensorLike | str | int | float | bool | list | complex | None
],
kwargs: dict[str, fx_type_utils.Argument],
fill_defaults: bool = True,
) -> tuple[list[Any], dict[str, Any]]:
"""Separate Python args and kwargs into ONNX inputs and attributes.
Extra_kwargs are ignored if their values are None. For example, if the
OpSchema has an attribute "rounding_mode" and the caller provides
"rounding_mode=None", the attribute "rounding_mode" will not be included
in the returned attributes when the OnnxFunction signature doesn't have
"rounding_mode" as an attribute.
Args:
param_schemas: The parameter schemas of an Op or a OnnxFunction.
args: The Python positional arguments supplied by the caller.
kwargs: The Python keyword arguments supplied by the caller.
fill_defaults: Whether to fill the default values for attributes.
Returns:
A tuple of two elements:
- A list of ONNX inputs.
- An dictionary of ONNX attribute names and values.
Raises:
TypeError: When allow_extra_kwargs is False and there are unknown kwargs.
TypeError: When a required input is not provided.
"""
# args, kwargs and param_schemas should be all in order
# user may not specify all inputs or attributes
import onnx
onnx_inputs: list[Any] = []
onnx_attributes: dict[str, Any] = {}
# NOTE: We need to copy kwargs because we will mutate it
copy_kwargs = kwargs.copy()
for i, param in enumerate(param_schemas):
if param.is_variadic_input:
# Exhaust all remaining args
onnx_inputs.extend(args[i:])
args = []
continue
if i < len(args):
if param.is_input:
onnx_inputs.append(args[i])
else:
onnx_attributes[param.name] = args[i]
elif param.name in copy_kwargs:
if param.is_input:
# Move the input from kwargs to inputs
onnx_inputs.append(copy_kwargs[param.name])
copy_kwargs.pop(param.name)
else:
onnx_attributes[param.name] = copy_kwargs[param.name]
elif (
param.is_attribute
and self.attributes[param.name].default_value.type
!= onnx.AttributeProto.UNDEFINED # type: ignore[attr-defined]
):
# User did not provide the attribute
if fill_defaults:
onnx_attributes[param.name] = param.default
# optional input
elif param.is_input:
if fill_defaults:
onnx_inputs.append(None)
# NOTE: Pick up extra kwargs if it's not None. None is not expected
# as an attribute value in torchlib.
for k, v in copy_kwargs.items():
if k not in onnx_attributes and v is not None:
onnx_attributes[k] = v
return onnx_inputs, onnx_attributes
def _is_arg_with_complex_dtype(arg: fx_type_utils.Argument) -> bool:
"""Check if the node has complex dtype recursively."""
if (
isinstance(arg, torch.fx.Node)
and "val" in arg.meta
and isinstance(arg.meta["val"], torch.Tensor)
and torch.is_complex(arg.meta["val"])
):
return True
elif isinstance(arg, list):
for item in arg:
return _is_arg_with_complex_dtype(item)
return False
def _find_onnx_data_type(
torch_input: fx_type_utils.TensorLike
| str
| int
| float
| bool
| list
| tuple
| complex
| None,
) -> set[str]:
"""Convert inputs data type from torch acceptable dtype to the compatible onnx dtype string."""
if (
isinstance(torch_input, fx_type_utils.TensorLike)
and torch_input.dtype is not None
):
return fx_type_utils.from_torch_dtype_to_onnx_dtype_str(torch_input.dtype)
if isinstance(torch_input, (int, float, bool, str, complex)):
return fx_type_utils.from_torch_dtype_to_onnx_dtype_str(type(torch_input))
if isinstance(torch_input, (list, tuple)) and torch_input: # [Tensor, Tensor]
the_first_non_none_item = next(
(item for item in torch_input if item is not None), None
)
set_dtype = _find_onnx_data_type(the_first_non_none_item)
if any(isinstance(input, fx_type_utils.TensorLike) for input in torch_input):
# NOTE: Any Tensor involved in a list would make it a seq(tensor(onnx_type))
return {f"seq({dtype})" for dtype in set_dtype}
else:
# constant list of non-tensor type
return set_dtype
if (
torch_input is None
or (
isinstance(torch_input, fx_type_utils.TensorLike)
and torch_input.dtype is None
)
or (isinstance(torch_input, (list, tuple)) and not torch_input)
):
# NOTE: None, No dtype, and empty list are edge cases, we allow it to be any type to relax the type check
# seq(tensor) also goes to here, as it is not supported in torchscript, and it would be None in this case.
return set()
raise RuntimeError(f"Unknown input type from input: {torch_input}")
```
|
==================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.56 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\passes\__init__.py
ENCODING: utf-8
```py
from .decomp import Decompose
from .functionalization import Functionalize, RemoveInputMutation
from .modularization import Modularize
from .readability import RestoreParameterAndBufferNames
from .type_promotion import InsertTypePromotion
from .virtualization import MovePlaceholderToFront, ReplaceGetAttrWithPlaceholder
__all__ = [
"Decompose",
"InsertTypePromotion",
"Functionalize",
"Modularize",
"MovePlaceholderToFront",
"RemoveInputMutation",
"RestoreParameterAndBufferNames",
"ReplaceGetAttrWithPlaceholder",
]
```
|
================================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 4.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\passes\_utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Common utility functions for FX passes.
These functions should NOT be directly invoked outside of `passes` package.
"""
from __future__ import annotations
import collections
import re
from typing import Callable
import torch.fx
import torch.fx.traceback as fx_traceback
def wrap_graph_module_for_node_meta_preservation(
graph_module: torch.fx.GraphModule,
) -> Callable:
"""Wrap a GraphModule with contexts to preserve node meta information, such as stacktrace info.
This is typically useful before calling `make_fx`. Without this wrapper, the
stacktrace information will be lost afterwards.
"""
def wrapped(*args):
with fx_traceback.preserve_node_meta():
return torch.fx.Interpreter(graph_module).run(*args)
return wrapped
def _get_node_base_name(node_name: str) -> tuple[str, int | None]:
pattern = r"(.*)\.(\d+)"
match = re.match(pattern, node_name)
if match is not None:
base_name, count_str = match.groups()
return base_name, int(count_str)
return node_name, None
def set_node_name(
node: torch.fx.Node,
new_name: str,
name_to_node_cache: dict[str, torch.fx.Node],
):
"""Safely set the unique name of a node.
If the new name is already taken by another node, the name of the other node will be
updated. If `new_name` is a string of format f"{base_name}.{count}", where `count`
is an integer, the other node will be renamed as f"{base_name}.{count+1}". If not,
the other node will be renamed as "{new_name}.1". This function will iteratively
update the names until there is no conflict.
``name_to_node_cache`` is required as an argument to avoid recomputation. The caller
is responsible for ensuring the cache is accurate and in sync with the owning module
of the node. The values in the cache will be updated accordingly.
Args:
node: The node to update.
new_name: The new name to use.
name_to_node_cache: A cache of node names to nodes.
"""
node_name_to_set = collections.deque([(node, new_name)])
while node_name_to_set:
node, new_name = node_name_to_set.pop()
if new_name in name_to_node_cache and name_to_node_cache[new_name] != node:
base_name, postfix_count = _get_node_base_name(new_name)
if postfix_count is None:
postfix_count = 0
node_name_to_set.append(
(name_to_node_cache[new_name], f"{base_name}.{postfix_count + 1}")
)
node.name = new_name
name_to_node_cache[new_name] = node
def replace_placeholder_name_and_target(
module: torch.fx.GraphModule, reference_module: torch.fx.GraphModule
):
"""Replace the argument names in module with those in reference_module.
This function assumes the two modules have the same signature structure.
The caller is responsible for ensuring this. Otherwise, the behavior of this
function is undefined. This function only does minimal sanity check that the two
modules have the same number of arguments.
Name conflicts between new names and existing node names in the graph are handled.
Check the documentation of :func:`set_node_name` for more details.
Raises:
RuntimeError: If the two modules have different number of arguments.
"""
placeholders = [node for node in module.graph.nodes if node.op == "placeholder"]
reference_placeholders = [
node for node in reference_module.graph.nodes if node.op == "placeholder"
]
if len(placeholders) != len(reference_placeholders):
raise RuntimeError(
"The two modules have different number of arguments. "
f"module: {len(placeholders)}, reference_module: {len(reference_placeholders)}"
)
name_to_node: dict[str, torch.fx.Node] = {}
for node in module.graph.nodes:
name_to_node[node.name] = node
for placeholder, reference_placeholder in zip(placeholders, reference_placeholders):
placeholder.target = reference_placeholder.target
set_node_name(placeholder, reference_placeholder.name, name_to_node)
module.recompile()
```
|
================================================================================================================================
SOURCE CODE FILE: decomp.py
LINES: 1
SIZE: 3.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\passes\decomp.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import contextlib
from typing import Callable, TYPE_CHECKING
import torch
import torch._ops
from torch._dispatch import python as python_dispatch
from torch._subclasses import fake_tensor
from torch.fx.experimental import proxy_tensor
from torch.onnx._internal.fx import _pass, diagnostics
from torch.onnx._internal.fx.passes import _utils
if TYPE_CHECKING:
from collections.abc import Mapping
import torch.fx
class Decompose(_pass.Transform):
def __init__(
self,
diagnostic_context: diagnostics.DiagnosticContext,
module: torch.fx.GraphModule,
decomposition_table: Mapping[torch._ops.OpOverload, Callable],
enable_dynamic_axes: bool,
allow_fake_constant: bool | None = False,
):
super().__init__(diagnostic_context, module)
self.decomposition_table = decomposition_table
self.enable_dynamic_axes = enable_dynamic_axes
self.allow_fake_constant = allow_fake_constant
def _run(self, *args, **kwargs) -> torch.fx.GraphModule:
assert not kwargs, "kwargs is not supported in Decompose."
# To preserve stack trace info after `make_fx`.
module = _utils.wrap_graph_module_for_node_meta_preservation(self.module)
# fake mode use static size to trace the size of tensors. while symbolic
# mode generates aten::sym_size to dynamically trace the size of tensors.
# e.g. fake mode:
# view: f32[3, 5, 20] = torch.ops.aten.view.default(x, [3, 5, 20])
# e.g. symbolic mode:
# sym_size = torch.ops.aten.sym_size(x, 0)
# sym_size_1 = torch.ops.aten.sym_size(x, 1)
# sym_size_2 = torch.ops.aten.sym_size(x, 2)
# sym_size_3 = torch.ops.aten.sym_size(x, 3)
# mul = sym_size_2 * sym_size_3; sym_size_2 = sym_size_3 = None
# view: f32[3, 5, 20] = torch.ops.aten.view.default(x, [sym_size, sym_size_1, mul])
# Mimic `torch._dynamo.export(aten_graph=True)` behavior in invoking `make_fx`.
# TODO: May need revisit for user fake mode export + dynamic shape scenario.
fake_mode: fake_tensor.FakeTensorMode | None = self.fake_mode
maybe_fake_args = self._maybe_fakefy_args(fake_mode, *args)
if fake_mode is not None:
# Using existing fake mode as context, signal `make_fx` that it does not need
# to create a new fake mode by passing tracing_mode as "real".
tracing_mode = "real"
else:
# Existing fake mode not found, signal `make_fx` to create one.
fake_mode = contextlib.nullcontext() # type: ignore[assignment]
tracing_mode = "symbolic" if self.enable_dynamic_axes else "fake"
# Apply decomposition table to the input graph.
assert fake_mode is not None # for mypy
with (
fake_tensor.unset_fake_temporarily(),
python_dispatch.enable_python_dispatcher(),
fake_mode,
):
decomposed_module = proxy_tensor.make_fx(
module,
decomposition_table=self.decomposition_table,
tracing_mode=tracing_mode,
_allow_non_fake_inputs=True,
_allow_fake_constant=bool(self.allow_fake_constant),
)(*maybe_fake_args)
# Rename placeholder targets to match the original module's signature since
# We don't want to map forward(x, y, z) to forward(arg0, arg1, arg2).
_utils.replace_placeholder_name_and_target(decomposed_module, self.module)
return decomposed_module
```
|
===========================================================================================================================================
SOURCE CODE FILE: functionalization.py
LINES: 1
SIZE: 6.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\onnx\_internal\fx\passes\functionalization.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from __future__ import annotations
import contextlib
from typing import Callable
import torch
import torch._ops
import torch.func
import torch.fx
from torch._subclasses import fake_tensor
from torch.fx.experimental import proxy_tensor
from torch.onnx._internal.fx import _pass, diagnostics
from torch.onnx._internal.fx.passes import _utils
from torch.utils import _pytree as pytree
class Functionalize(_pass.Transform):
"""Functionalize a GraphModule.
This pass utilizes ``functionalization`` utility of ``torch._functorch`` to convert
a GraphModule into a functional form. The two main functionalities are (copied from
its documentations):
* ``functionalization`` removes (intermediate) mutations and aliasing from a
function, while preserving the function's semantics.
* ``functionalization`` also removes mutations (and views) that were performed
on function inputs. However to preserve semantics, functionalize will "fix up" the
mutations after the transform has finished running, by detecting if any tensor inputs
"should have" been mutated, and copying the new data back to the inputs if necessary.
For example, consider::
def fn(a, b):
a.add_(b)
return a
For a call like `fn(x, y)`, the variable `x` outside is also mutated. Hence just
functionalizing is not enough for preserving the original semantics. A "special"
input mutation step needs to be inserted at the end.::
# After functionalization, without input mutation "fix up".
# This is not semantically the same. The variable outside the function call that
# was passed in as `a` is not mutated.
def fn(a, b):
new_a = a + b
return new_a
# Functionalization with input mutation "fix up" that preserves semantics.
def fn(a, b):
new_a = a + b
# Copying the new data back to the inputs
a.copy_(new_a)
return new_a
For ONNX inference, it is recommended to run ``RemoveInputMutation`` after this pass.
``RemoveInputMutation`` removes the "fix up" nodes that were added by ``Functionalize``,
which are not needed for ONNX inference.
"""
def __init__(
self,
diagnostic_context: diagnostics.DiagnosticContext,
module: torch.fx.GraphModule,
enable_dynamic_axes: bool,
allow_fake_constant: bool | None = False,
):
super().__init__(diagnostic_context, module)
self.enable_dynamic_axes = enable_dynamic_axes
self.allow_fake_constant = allow_fake_constant
def _functionalize(self, function: Callable) -> Callable:
# Working around a dispatcher issue with `torch.func.functionalize` when used
# together with `make_fx`.
# Ref: https://github.com/pytorch/pytorch/issues/99774#issuecomment-1527949391
def wrapped(*inputs):
inputs_functional = pytree.tree_map_only(
torch.Tensor, torch._to_functional_tensor, inputs
)
torch._enable_functionalization(reapply_views=True)
try:
out = function(*inputs_functional)
finally:
torch._disable_functionalization()
flat_inputs_functional = pytree.tree_leaves(inputs_functional)
for input_functional in flat_inputs_functional:
if isinstance(input_functional, torch.Tensor):
torch._sync(input_functional)
pytree.tree_map(torch._sync, out)
out_unwrapped = pytree.tree_map(torch._from_functional_tensor, out)
return out_unwrapped
return wrapped
def _run(self, *args) -> torch.fx.GraphModule:
# To preserve stack trace info after `make_fx`.
module = _utils.wrap_graph_module_for_node_meta_preservation(self.module)
functionalized_callable = self._functionalize(module)
# Mimic `torch._dynamo.export(aten_graph=True)` behavior in invoking `make_fx`.
# TODO: May need revisit for user fake mode export + dynamic shape scenario.
fake_mode: fake_tensor.FakeTensorMode | None = self.fake_mode
maybe_fake_args = self._maybe_fakefy_args(fake_mode, *args)
if fake_mode is not None:
# Using existing fake mode as context, signal `make_fx` that it does not need
# to create a new fake mode by passing tracing_mode as "real".
tracing_mode = "real"
else:
# Existing fake mode not found, signal `make_fx` to create one.
fake_mode = contextlib.nullcontext() # type: ignore[assignment]
tracing_mode = "symbolic" if self.enable_dynamic_axes else "fake"
assert fake_mode is not None # for mypy
with fake_tensor.unset_fake_temporarily(), fake_mode:
graph_module = proxy_tensor.make_fx(
functionalized_callable,
decomposition_table={},
tracing_mode=tracing_mode,
_allow_non_fake_inputs=True,
_allow_fake_constant=bool(self.allow_fake_constant),
)(*maybe_fake_args)
# Rename placeholder targets to match the original module's signature since
# We don't want to map forward(x, y, z) to forward(arg0, arg1, arg2).
_utils.replace_placeholder_name_and_target(graph_module, self.module)
return graph_module
class RemoveInputMutation(_pass.Transform):
"""Remove `aten.copy_.default` nodes that mutate module inputs.
This pass is recommended to be used after ``Functionalization`` pass.
``Functionalization`` pass adds `aten.copy_.default` nodes to the graph
when it detects mutations to inputs. These nodes are not needed for ONNX export
for inference. They could be useful for training.
"""
def _run(self, *args) -> torch.fx.GraphModule:
for node in reversed(self.module.graph.nodes):
if (
node.op == "call_function"
and node.target == torch.ops.aten.copy_.default
and len(node.users) == 0
and isinstance(node.args[0], torch.fx.Node)
and node.args[0].op == "placeholder"
):
self.module.graph.erase_node(node)
return self.module
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.