diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faae8ed76c6ee322517308ae8cdd872f41f5f2f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d711d6d923472bf6deb7c4622a8a9ed44d56872 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/exported_program.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/exported_program.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ed0b42cba0b09b63c2b44afd0e9fc84ce0cae5f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/exported_program.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/non_strict_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/non_strict_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f2ebbe9adb8580ac3004e94fa059ed4e64d4b14 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/non_strict_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85b2a6889aa015deba62c0e43538156cda3fb501 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14accbb9c42a2b8c797ced4b7b262a886e29ba90 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80edb018612e90fe78842b771474310f73d5742f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/wrappers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cb77ddb752b8ebad7cdbf5e58205e337e788760 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/__pycache__/wrappers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/case.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/case.py new file mode 100644 index 0000000000000000000000000000000000000000..6c4c03572e3ab3c0c7ed9ff9f816ceac3b725051 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/case.py @@ -0,0 +1,188 @@ +import inspect +import re +import string +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Dict, List, Optional, Set, Tuple, Union +from types import ModuleType + +import torch + +_TAGS: Dict[str, Dict[str, Any]] = { + "torch": { + "cond": {}, + "dynamic-shape": {}, + "escape-hatch": {}, + "map": {}, + "dynamic-value": {}, + "operator": {}, + "mutation": {}, + }, + "python": { + "assert": {}, + "builtin": {}, + "closure": {}, + "context-manager": {}, + "control-flow": {}, + "data-structure": {}, + "standard-library": {}, + "object-model": {}, + }, +} + + +class SupportLevel(Enum): + """ + Indicates at what stage the feature + used in the example is handled in export. + """ + + SUPPORTED = 1 + NOT_SUPPORTED_YET = 0 + + +class ExportArgs: + __slots__ = ("args", "kwargs") + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +InputsType = Union[Tuple[Any, ...], ExportArgs] + + +def check_inputs_type(x): + if not isinstance(x, (ExportArgs, tuple)): + raise ValueError( + f"Expecting inputs type to be either a tuple, or ExportArgs, got: {type(x)}" + ) + + +def _validate_tag(tag: str): + parts = tag.split(".") + t = _TAGS + for part in parts: + assert set(part) <= set( + string.ascii_lowercase + "-" + ), f"Tag contains invalid characters: {part}" + if part in t: + t = t[part] + else: + raise ValueError(f"Tag {tag} is not found in registered tags.") + + +@dataclass(frozen=True) +class ExportCase: + example_inputs: InputsType + description: str # A description of the use case. + model: torch.nn.Module + name: str + extra_inputs: Optional[InputsType] = None # For testing graph generalization. + # Tags associated with the use case. (e.g dynamic-shape, escape-hatch) + tags: Set[str] = field(default_factory=set) + support_level: SupportLevel = SupportLevel.SUPPORTED + dynamic_shapes: Optional[Dict[str, Any]] = None + + def __post_init__(self): + check_inputs_type(self.example_inputs) + if self.extra_inputs is not None: + check_inputs_type(self.extra_inputs) + + for tag in self.tags: + _validate_tag(tag) + + if not isinstance(self.description, str) or len(self.description) == 0: + raise ValueError(f'Invalid description: "{self.description}"') + + +_EXAMPLE_CASES: Dict[str, ExportCase] = {} +_MODULES: Set[ModuleType] = set() +_EXAMPLE_CONFLICT_CASES: Dict[str, List[ExportCase]] = {} +_EXAMPLE_REWRITE_CASES: Dict[str, List[ExportCase]] = {} + + +def register_db_case(case: ExportCase) -> None: + """ + Registers a user provided ExportCase into example bank. + """ + if case.name in _EXAMPLE_CASES: + if case.name not in _EXAMPLE_CONFLICT_CASES: + _EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]] + _EXAMPLE_CONFLICT_CASES[case.name].append(case) + return + + _EXAMPLE_CASES[case.name] = case + + +def to_snake_case(name): + name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower() + + +def _make_export_case(m, name, configs): + if not issubclass(m, torch.nn.Module): + raise TypeError("Export case class should be a torch.nn.Module.") + m = m() + + if "description" not in configs: + # Fallback to docstring if description is missing. + assert ( + m.__doc__ is not None + ), f"Could not find description or docstring for export case: {m}" + configs = {**configs, "description": m.__doc__} + return ExportCase(**{**configs, "model": m, "name": name}) + + +def export_case(**kwargs): + """ + Decorator for registering a user provided case into example bank. + """ + + def wrapper(m): + configs = kwargs + module = inspect.getmodule(m) + if module in _MODULES: + raise RuntimeError("export_case should only be used once per example file.") + + assert module is not None + _MODULES.add(module) + normalized_name = to_snake_case(m.__name__) + module_name = module.__name__.split(".")[-1] + if module_name != normalized_name: + raise RuntimeError( + f'Module name "{module.__name__}" is inconsistent with exported program ' + + f'name "{m.__name__}". Please rename the module to "{normalized_name}".' + ) + + case = _make_export_case(m, module_name, configs) + register_db_case(case) + return case + + return wrapper + + +def export_rewrite_case(**kwargs): + def wrapper(m): + configs = kwargs + + parent = configs.pop("parent") + assert isinstance(parent, ExportCase) + key = parent.name + if key not in _EXAMPLE_REWRITE_CASES: + _EXAMPLE_REWRITE_CASES[key] = [] + + configs["example_inputs"] = parent.example_inputs + case = _make_export_case(m, to_snake_case(m.__name__), configs) + _EXAMPLE_REWRITE_CASES[key].append(case) + return case + + return wrapper + + +def normalize_inputs(x: InputsType) -> ExportArgs: + if isinstance(x, tuple): + return ExportArgs(*x) + + assert isinstance(x, ExportArgs) + return x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de69e541af87384c0c3168cabd40dd9a71809e83 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/assume_constant_result.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/assume_constant_result.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c05427f35375e88a5d204ec40c462e4b79a22e80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/assume_constant_result.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/class_method.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/class_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d3a65a498caa49243b78b353ada0e217e8d9d73 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/class_method.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_class_method.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_class_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46ce6818933d2f665ad3cb1aae7f56cb34349341 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_class_method.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nested_function.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nested_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42594e8bcc73e1b6302d889e5e0714a5e835c24c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_branch_nested_function.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_closed_over_variable.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_closed_over_variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1659651f93a9ec8402c94b81f06160b1021fbfd8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_closed_over_variable.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_operands.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_operands.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8496d6d6ae66027f62b13d2245543cb0a9c5c88 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_operands.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47006138ee3cd529405047c881ee9d81ab8650dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/cond_predicate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_size_example.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_size_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ab70665a26eda210a6bfc4d9cc8f8b3213b2022 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/constrain_as_size_example.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/decorator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca4e0ed9baf35594f2ceb44b8c79f9f47e7a8d09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/decorator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dictionary.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dictionary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28a29d869315beb77d49f1fcb2420c0300d0f43c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dictionary.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_assert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_assert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c20e68b4b6a885cb1cbe9df8d69ad82a6965be37 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_assert.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_constructor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_constructor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d737897159ec1cf81f407c8f1f1324a3fc6f4fb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_constructor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_if_guard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_if_guard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fdc875205f318200c0a9124c0761e0f3598be32 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_if_guard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_map.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d240903b5640fb46ae60e6164ab1f999719da39 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_map.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_round.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b1ef560035018fb0b4aceb6f7688004d751fe2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_round.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_slicing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_slicing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad6a65f9dac5e44764b380db0a850d4540c586d2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_slicing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f652d2229c74cf7740eee2478f3febf3cdf7f19a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/dynamic_shape_view.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/fn_with_kwargs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/fn_with_kwargs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04b3ee23367219d3bdb122cef176f5eca7aae048 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/fn_with_kwargs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_unpack.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_unpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8449f6488edfb857cc801c106cdb379400421af8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/list_unpack.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/nested_function.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/nested_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c7e0cc55cb0a7c18a2497e83a621c0d4efb32f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/nested_function.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/null_context_manager.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/null_context_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcbda245b2163162669ca42efac73c05c026cbe9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/null_context_manager.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17bd58607ed3e6eaffb89df232de8996b6855eed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/pytree_flatten.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/specialized_attribute.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/specialized_attribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c581719281ef87b2814e5b5dc994af4ec88f54d8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/specialized_attribute.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_for_loop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_for_loop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac6cfbbc6b51c13f9d320d265712e322276c532e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_for_loop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_if.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_if.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1282d4b88a99a1940e5223d810fb8302b2e2430c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/static_if.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db815c1153df743652b167e0c94603b72dbaca82 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/tensor_setattr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/type_reflection_method.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/type_reflection_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..161579160933e34d5045d626597bb21f62440263 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/type_reflection_method.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/user_input_mutation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/user_input_mutation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b20a5df9f5bab252c5c3076c14777c4a352116e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/__pycache__/user_input_mutation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py new file mode 100644 index 0000000000000000000000000000000000000000..9c8aeadc45ae291f363bb4850b30bab4fb14214d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py @@ -0,0 +1,26 @@ +import torch + +from torch._export.db.case import export_case + + +class MyAutogradFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x.clone() + + @staticmethod + def backward(ctx, grad_output): + return grad_output + 1 + + +@export_case( + example_inputs=(torch.randn(3, 2),), +) +class AutogradFunction(torch.nn.Module): + """ + TorchDynamo does not keep track of backward() on autograd functions. We recommend to + use `allow_in_graph` to mitigate this problem. + """ + + def forward(self, x): + return MyAutogradFunction.apply(x) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..77c629559d21eb6390c00ce8143d773d16f5710f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py @@ -0,0 +1,24 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 4),), +) +class ClassMethod(torch.nn.Module): + """ + Class methods are inlined during tracing. + """ + + @classmethod + def method(cls, x): + return x + 1 + + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(4, 2) + + def forward(self, x): + x = self.linear(x) + return self.method(x) * self.__class__.method(x) * type(self).method(x) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py new file mode 100644 index 0000000000000000000000000000000000000000..bd8a1db034256fd305ae8924254070ac212e9248 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py @@ -0,0 +1,44 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchNestedFunction(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + This example demonstrates using nested function in cond(). + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + def true_fn(x): + def inner_true_fn(y): + return x + y + + return inner_true_fn(x) + + def false_fn(x): + def inner_false_fn(y): + return x - y + + return inner_false_fn(x) + + return cond(x.shape[0] < 10, true_fn, false_fn, [x]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py new file mode 100644 index 0000000000000000000000000000000000000000..38905b57e31243e10e52193ab36a8503ba4991f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py @@ -0,0 +1,63 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(6),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchNonlocalVariables(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + This example demonstrates how to rewrite code to avoid capturing closure variables in branch functions. + + The code below will not work because capturing closure variables is not supported. + ``` + my_tensor_var = x + 100 + my_primitive_var = 3.14 + + def true_fn(y): + nonlocal my_tensor_var, my_primitive_var + return y + my_tensor_var + my_primitive_var + + def false_fn(y): + nonlocal my_tensor_var, my_primitive_var + return y - my_tensor_var - my_primitive_var + + return cond(x.shape[0] > 5, true_fn, false_fn, [x]) + ``` + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + my_tensor_var = x + 100 + my_primitive_var = 3.14 + + def true_fn(x, y, z): + return x + y + z + + def false_fn(x, y, z): + return x - y - z + + return cond( + x.shape[0] > 5, + true_fn, + false_fn, + [x, my_tensor_var, torch.tensor(my_primitive_var)], + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py new file mode 100644 index 0000000000000000000000000000000000000000..b201c5d679b8eab6e9a3a74705772acf3a9a5af8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.tensor(True), torch.ones(3, 2)), + tags={"torch.cond", "python.closure"}, +) +class CondClosedOverVariable(torch.nn.Module): + """ + torch.cond() supports branches closed over arbitrary variables. + """ + + def forward(self, pred, x): + def true_fn(val): + return x * 2 + + def false_fn(val): + return x - 2 + + return cond(pred, true_fn, false_fn, [x + 1]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py new file mode 100644 index 0000000000000000000000000000000000000000..a05e584100c958a124f9cfc59c489b417f5d3214 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py @@ -0,0 +1,39 @@ +import torch + +from torch._export.db.case import export_case +from torch.export import Dim +from functorch.experimental.control_flow import cond + +x = torch.randn(3, 2) +y = torch.ones(2) +dim0_x = Dim("dim0_x") + +@export_case( + example_inputs=(x, y), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, + extra_inputs=(torch.randn(2, 2), torch.ones(2)), + dynamic_shapes={"x": {0: dim0_x}, "y": None}, +) +class CondOperands(torch.nn.Module): + """ + The operands passed to cond() must be: + - a list of tensors + - match arguments of `true_fn` and `false_fn` + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + + def forward(self, x, y): + def true_fn(x, y): + return x + y + + def false_fn(x, y): + return x - y + + return cond(x.shape[0] > 2, true_fn, false_fn, [x, y]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py new file mode 100644 index 0000000000000000000000000000000000000000..fd02e2484c54678712593f7c9fa28344e5574375 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py @@ -0,0 +1,29 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(6, 4, 3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondPredicate(torch.nn.Module): + """ + The conditional statement (aka predicate) passed to cond() must be one of the following: + - torch.Tensor with a single element + - boolean expression + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + pred = x.dim() > 2 and x.shape[2] > 10 + + return cond(pred, lambda x: x.cos(), lambda y: y.sin(), [x]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py new file mode 100644 index 0000000000000000000000000000000000000000..1af4b22dc988816c011aa2eb085f97c9850d257a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py @@ -0,0 +1,27 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.tensor(4),), + tags={ + "torch.dynamic-value", + "torch.escape-hatch", + }, +) +class ConstrainAsSizeExample(torch.nn.Module): + """ + If the value is not known at tracing time, you can provide hint so that we + can trace further. Please look at constrain_as_value and constrain_as_size APIs + constrain_as_size is used for values that NEED to be used for constructing + tensor. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + a = x.item() + torch._constrain_as_size(a, min=0, max=5) + return torch.ones((a, 5)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..39eff84af34812e1a31006c698652ec6dc2bbd20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py @@ -0,0 +1,26 @@ +import functools + +import torch + +from torch._export.db.case import export_case + + +def test_decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + 1 + + return wrapper + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(3, 2)), +) +class Decorator(torch.nn.Module): + """ + Decorators calls are inlined into the exported function during tracing. + """ + + @test_decorator + def forward(self, x, y): + return x + y diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py new file mode 100644 index 0000000000000000000000000000000000000000..382b444d7f8a285e85c4f5530f01972918a6d96f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.tensor(4)), + tags={"python.data-structure"}, +) +class Dictionary(torch.nn.Module): + """ + Dictionary structures are inlined and flattened along tracing. + """ + def __init__(self): + super().__init__() + + def forward(self, x, y): + elements = {} + elements["x2"] = x * x + y = y * elements["x2"] + return {"y": y} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py new file mode 100644 index 0000000000000000000000000000000000000000..ec95df0bd97dda4e673e7898a1072db8215f8310 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.assert"}, +) +class DynamicShapeAssert(torch.nn.Module): + """ + A basic usage of python assertion. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + # assertion with error message + assert x.shape[0] > 2, f"{x.shape[0]} is greater than 2" + # assertion without error message + assert x.shape[0] > 1 + return x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py new file mode 100644 index 0000000000000000000000000000000000000000..45c8d36bee1fa7ed0102809a6871fbfa76628696 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2, 2),), + tags={"torch.dynamic-shape", "python.control-flow"}, +) +class DynamicShapeIfGuard(torch.nn.Module): + """ + `if` statement with backed dynamic shape predicate will be specialized into + one particular branch and generate a guard. However, export will fail if the + the dimension is marked as dynamic shape from higher level API. + """ + + def forward(self, x): + if x.shape[0] == 3: + return x.cos() + + return x.sin() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_map.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_map.py new file mode 100644 index 0000000000000000000000000000000000000000..5be0003fd170abb49afc80544229177d4b8b8de4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_map.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import map + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(2)), + tags={"torch.dynamic-shape", "torch.map"}, +) +class DynamicShapeMap(torch.nn.Module): + """ + functorch map() maps a function over the first tensor dimension. + """ + + def __init__(self): + super().__init__() + + def forward(self, xs, y): + def body(x, y): + return x + y + + return map(body, xs, y) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py new file mode 100644 index 0000000000000000000000000000000000000000..7d6a50320f5baba5843e6e4831789c1993b5e6ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py @@ -0,0 +1,24 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel +from torch.export import Dim + +x = torch.ones(3, 2) +dim0_x = Dim("dim0_x") + +@export_case( + example_inputs=(x,), + tags={"torch.dynamic-shape", "python.builtin"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, + dynamic_shapes={"x": {0: dim0_x}}, +) +class DynamicShapeRound(torch.nn.Module): + """ + Calling round on dynamic shapes is not supported. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return x[: round(x.shape[0] / 2)] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py new file mode 100644 index 0000000000000000000000000000000000000000..a5bd7fbd8edf523d4d6d11250bc9f8c8986653fd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py @@ -0,0 +1,27 @@ +from typing import List + +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=([torch.ones(3, 2), torch.tensor(4), torch.tensor(5)],), + tags={"python.control-flow", "python.data-structure"}, +) +class ListUnpack(torch.nn.Module): + """ + Lists are treated as static construct, therefore unpacking should be + erased after tracing. + """ + + def __init__(self): + super().__init__() + + def forward(self, args: List[torch.Tensor]): + """ + Lists are treated as static construct, therefore unpacking should be + erased after tracing. + """ + x, *y = args + return x + y[0] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/nested_function.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/nested_function.py new file mode 100644 index 0000000000000000000000000000000000000000..58b946f94a0c28447501a1d1a1fd4c98405d49d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/nested_function.py @@ -0,0 +1,27 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(2)), + tags={"python.closure"}, +) +class NestedFunction(torch.nn.Module): + """ + Nested functions are traced through. Side effects on global captures + are not supported though. + """ + def __init__(self): + super().__init__() + + def forward(self, a, b): + x = a + b + z = a - b + + def closure(y): + nonlocal x + x += 1 + return x * y + z + + return closure(x) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..1689537db833a90bf09122221dde47aad79ebf34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py @@ -0,0 +1,26 @@ +import contextlib + +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.context-manager"}, +) +class NullContextManager(torch.nn.Module): + """ + Null context manager in Python will be traced out. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + """ + Null context manager in Python will be traced out. + """ + ctx = contextlib.nullcontext() + with ctx: + return x.sin() + x.cos() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py new file mode 100644 index 0000000000000000000000000000000000000000..4a06207b6eaf8f24d673c7ec227c3a5643c2d6a3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.randn(2, 3),), + tags={"python.object-model"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class OptionalInput(torch.nn.Module): + """ + Tracing through optional input is not supported yet + """ + + def forward(self, x, y=torch.ones(2, 3)): + if y is not None: + return x + y + return x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..0d799b2a609acc2b626e70f5c9beb131784f4e6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py @@ -0,0 +1,20 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel +from torch.utils import _pytree as pytree + + +@export_case( + example_inputs=({1: torch.randn(3, 2), 2: torch.randn(3, 2)},), + support_level=SupportLevel.SUPPORTED, +) +class PytreeFlatten(torch.nn.Module): + """ + Pytree from PyTorch can be captured by TorchDynamo. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + y, spec = pytree.tree_flatten(x) + return y[0] + 1 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fc2b0ec36a5f9296aceb3146be74f07d5e5ac2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case +from torch.export import Dim + +x = torch.ones(3, 2) +dim1_x = Dim("dim1_x") + +@export_case( + example_inputs=(x,), + tags={"torch.dynamic-shape"}, + dynamic_shapes={"x": {1: dim1_x}}, +) +class ScalarOutput(torch.nn.Module): + """ + Returning scalar values from the graph is supported, in addition to Tensor + outputs. Symbolic shapes are captured and rank is specialized. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + return x.shape[1] + 1 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py new file mode 100644 index 0000000000000000000000000000000000000000..c258e430f7ea0fa4a5b58ef9d6988e936fbb0f3f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2, 2),), + tags={"python.control-flow"}, +) +class StaticIf(torch.nn.Module): + """ + `if` statement with static predicate value should be traced through with the + taken branch. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + if len(x.shape) == 3: + return x + torch.ones(1, 1, 1) + + return x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py new file mode 100644 index 0000000000000000000000000000000000000000..fae18fb1cf934bf1a9437b70578d58cf10130a4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.randn(3, 2), "attr"), + tags={"python.builtin"}, + support_level=SupportLevel.SUPPORTED, +) +class TensorSetattr(torch.nn.Module): + """ + setattr() call onto tensors is not supported. + """ + def forward(self, x, attr): + setattr(x, attr, torch.randn(3, 2)) + return x + 4 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d78703e2d5ff96c15bd5b772fea10e044ffbfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py @@ -0,0 +1,41 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel, export_rewrite_case + + +class A: + @classmethod + def func(cls, x): + return 1 + x + + +@export_case( + example_inputs=(torch.ones(3, 4),), + tags={"python.builtin"}, + support_level=SupportLevel.SUPPORTED, +) +class TypeReflectionMethod(torch.nn.Module): + """ + type() calls on custom objects followed by attribute accesses are not allowed + due to its overly dynamic nature. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + a = A() + return type(a).func(x) + + +@export_rewrite_case(parent=TypeReflectionMethod) +class TypeReflectionMethodRewrite(torch.nn.Module): + """ + Custom object class methods will be inlined. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return A.func(x) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb16cd64a56fce4c4ccfdbb257f32f11514439c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py @@ -0,0 +1,18 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.mutation"}, + support_level=SupportLevel.SUPPORTED, +) +class UserInputMutation(torch.nn.Module): + """ + Directly mutate user input in forward + """ + + def forward(self, x): + x.mul_(2) + return x.cos() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/gen_example.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/gen_example.py new file mode 100644 index 0000000000000000000000000000000000000000..301cf42beb062dd5ad9763507417de57fcc6e48d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/gen_example.py @@ -0,0 +1,28 @@ +import os +import sys + +import torch._export.db.examples as examples + +TEMPLATE = '''import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.randn(3, 2),), + tags={{}}, +) +def {case_name}(x): + """ + """ + + return +''' + +if __name__ == "__main__": + assert len(sys.argv) == 2 + root_dir = examples.__name__.replace(".", "/") + assert os.path.exists(root_dir) + with open(os.path.join(root_dir, sys.argv[1] + ".py"), "w") as f: + print("Writing to", f.name, "...") + f.write(TEMPLATE.format(case_name=sys.argv[1])) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/db/logging.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..fc412b8c5082dd8c4346711314fc7cc43c1a9ba2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/db/logging.py @@ -0,0 +1,2 @@ +def exportdb_error_message(case_name: str): + return "" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff6751497ce61db3f10ad7b07f9ff63d77174d09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74bfe7f593be2a915c2f5f7c3b379a2273addc94 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema_check.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50da8a2bb5495cc75262ef20652a5ee98e75d600 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/serialize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/upgrade.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/upgrade.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a71d1de9ee018ca6ddc94824ff00a180e8cd82b4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/__pycache__/upgrade.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/schema.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..0d6bebb71f3f854c9571e6f7507b5a715400f3c6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/schema.py @@ -0,0 +1,346 @@ +# NOTE: This is a placeholder for iterating on export serialization schema design. +# Anything is subject to change and no guarantee is provided at this point. + +from dataclasses import dataclass, field +from enum import IntEnum +from typing import Dict, List, Optional, Tuple + +from torch._export.serde.union import _Union + +# NOTE: Please update this value if any modifications are made to the schema +SCHEMA_VERSION = (5, 1) +TREESPEC_VERSION = 1 + + +class ScalarType(IntEnum): + UNKNOWN = 0 + BYTE = 1 + CHAR = 2 + SHORT = 3 + INT = 4 + LONG = 5 + HALF = 6 + FLOAT = 7 + DOUBLE = 8 + COMPLEXHALF = 9 + COMPLEXFLOAT = 10 + COMPLEXDOUBLE = 11 + BOOL = 12 + BFLOAT16 = 13 + + +class Layout(IntEnum): + Unknown = 0 + SparseCoo = 1 + SparseCsr = 2 + SparseCsc = 3 + SparseBsr = 4 + SparseBsc = 5 + _mkldnn = 6 + Strided = 7 + + +class MemoryFormat(IntEnum): + Unknown = 0 + ContiguousFormat = 1 + ChannelsLast = 2 + ChannelsLast3d = 3 + PreserveFormat = 4 + + +@dataclass +class Device: + type: str + index: Optional[int] = None + + +@dataclass(repr=False) +class SymExprHint(_Union): + as_int: int + as_float: float + as_bool: bool + + +# This is for storing the symbolic expressions behind symints/symfloats/symbools +# For example, we can get something like +# SymExpr(expr_str="s0 + s1", hint=SymExprHint(as_int=4) +# if we also have the hint that s0 and s1 are both 2. +@dataclass +class SymExpr: + expr_str: str + hint: Optional[SymExprHint] = None + + +@dataclass(repr=False) +class SymInt(_Union): + as_expr: SymExpr + as_int: int + + +@dataclass(repr=False) +class SymBool(_Union): + as_expr: SymExpr + as_bool: bool + + +@dataclass +class TensorMeta: + dtype: ScalarType + sizes: List[SymInt] + requires_grad: bool + device: Device + strides: List[SymInt] + storage_offset: SymInt + layout: Layout + + +# In most cases we will use the "as_name" field to store arguments which are +# SymInts. +# The "as_int" field is used in the case where we have a list containing a mix +# of SymInt and ints (ex. [1, s0, ...]). We will serialize this type of list to +# be List[SymIntArgument] and map the SymInts to the "as_name" field, and ints +# to the "as_int" field. +@dataclass(repr=False) +class SymIntArgument(_Union): + as_name: str + as_int: int + + +# In most cases we will use the "as_name" field to store arguments which are +# SymBools. +# The "as_bool" field is used in the case where we have a list containing a mix +# of SymBool and bools (ex. [True, i0, ...]). We will serialize this type of list to +# be List[SymboolArgument] and map the SymBools to the "as_name" field, and bools +# to the "as_bool" field. +@dataclass(repr=False) +class SymBoolArgument(_Union): + as_name: str + as_bool: bool + + +@dataclass +class TensorArgument: + name: str + + +# This is use for storing the contents of a list which contain optional tensors +# (Tensor?[], ex. [Tensor, None, ...]), where the list will be serialized to the +# type List[OptionalTensorArgument], with tensor values seiralized to the +# "as_tensor" field, and None values serialized to the "as_none" field. +@dataclass(repr=False) +class OptionalTensorArgument(_Union): + as_tensor: str + as_none: Tuple[()] + + +@dataclass +class GraphArgument: + name: str + graph: 'Graph' + + +@dataclass +class CustomObjArgument: + name: str + class_fqn: str + + +# This is actually a union type +@dataclass(repr=False) +class Argument(_Union): + as_none: Tuple[()] + as_tensor: TensorArgument + as_tensors: List[TensorArgument] + as_int: int + as_ints: List[int] + as_float: float + as_floats: List[float] + as_string: str + as_strings: List[str] + as_sym_int: SymIntArgument + as_sym_ints: List[SymIntArgument] + as_scalar_type: ScalarType + as_memory_format: MemoryFormat + as_layout: Layout + as_device: Device + as_bool: bool + as_bools: List[bool] + as_sym_bool: SymBoolArgument + as_sym_bools: List[SymBoolArgument] + as_graph: GraphArgument + as_optional_tensors: List[OptionalTensorArgument] + as_custom_obj: CustomObjArgument + as_operator: str + + +@dataclass +class NamedArgument: + # Argument name from the operator schema + name: str + arg: Argument + + +@dataclass +class Node: + target: str + inputs: List[NamedArgument] + outputs: List[Argument] + metadata: Dict[str, str] + + +@dataclass +class Graph: + inputs: List[Argument] + outputs: List[Argument] + nodes: List[Node] + tensor_values: Dict[str, TensorMeta] + sym_int_values: Dict[str, SymInt] + sym_bool_values: Dict[str, SymBool] + # This is for deserializing the submodule graphs from higher order ops + # (ex. cond, map) where single tensor returns will just return a single + # tensor, rather than following export schema and returning a singleton + # list. + is_single_tensor_return: bool = False + custom_obj_values: Dict[str, CustomObjArgument] = field(default_factory=dict) + + +@dataclass +class UserInputSpec: + # Actually, only tensors and SymInts are allowed here + arg: Argument + + +@dataclass +class InputToParameterSpec: + arg: TensorArgument + parameter_name: str + + +@dataclass +class InputToBufferSpec: + arg: TensorArgument + buffer_name: str + persistent: bool + + + +@dataclass +class InputToTensorConstantSpec: + arg: TensorArgument + tensor_constant_name: str + + +@dataclass +class InputToCustomObjSpec: + arg: CustomObjArgument + custom_obj_name: str + + +@dataclass(repr=False) +class InputSpec(_Union): + user_input: UserInputSpec + parameter: InputToParameterSpec + buffer: InputToBufferSpec + tensor_constant: InputToTensorConstantSpec + custom_obj: InputToCustomObjSpec + + +@dataclass +class UserOutputSpec: + arg: Argument + + +@dataclass +class LossOutputSpec: + arg: TensorArgument + + +@dataclass +class BufferMutationSpec: + arg: TensorArgument + buffer_name: str + + +@dataclass +class GradientToParameterSpec: + arg: TensorArgument + parameter_name: str + + +@dataclass +class GradientToUserInputSpec: + arg: TensorArgument + user_input_name: str + + +@dataclass +class UserInputMutationSpec: + arg: TensorArgument + user_input_name: str + + +@dataclass(repr=False) +class OutputSpec(_Union): + user_output: UserOutputSpec + loss_output: LossOutputSpec + buffer_mutation: BufferMutationSpec + gradient_to_parameter: GradientToParameterSpec + gradient_to_user_input: GradientToUserInputSpec + user_input_mutation: UserInputMutationSpec + + +@dataclass +class GraphSignature: + input_specs: List[InputSpec] + output_specs: List[OutputSpec] + + +@dataclass +class RangeConstraint: + min_val: int + max_val: int + + +@dataclass +class ModuleCallSignature: + inputs: List[Argument] + outputs: List[Argument] + + # These are serialized by calling pytree.treespec_loads + # And deserialized by calling pytree.treespec_dumps + in_spec: str + out_spec: str + + +@dataclass +class ModuleCallEntry: + fqn: str + signature: Optional[ModuleCallSignature] = None + + +@dataclass +class GraphModule: + graph: Graph + signature: GraphSignature + # This is used for unflattening, by tracking the calling structure of all of + # the modules in order to unflatten the modules back to the eager calling + # conventions. + module_call_graph: List[ModuleCallEntry] + + +# Invariant: Every time a change is made to the schema, one of the versions +# should be upadted. +@dataclass +class SchemaVersion: + major: int # Major version number is bumped every time a breaking change is made. + minor: int # Minor version number is bumped when a compatible change is made. + + +@dataclass +class ExportedProgram: + graph_module: GraphModule + # Key is the opset namespace (ex. aten), and value is the version number + opset_version: Dict[str, int] + range_constraints: Dict[str, RangeConstraint] + schema_version: SchemaVersion + dialect: str diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/schema_check.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/schema_check.py new file mode 100644 index 0000000000000000000000000000000000000000..cde4cf1ada271ca19800f2480a9f8c203286a340 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/schema_check.py @@ -0,0 +1,285 @@ +import dataclasses +import hashlib +import re +import typing +from enum import IntEnum +from typing import Any, Dict, Optional, Union + +from torch._export.serde import schema +from torch._export.serde.union import _Union + + +class SchemaUpdateError(Exception): + pass + + +def _check(x, msg): + if not x: + raise SchemaUpdateError(msg) + + +def _staged_schema(): + ret: Dict[str, Any] = {} + defs = {} + + def _handle_aggregate(ty): + def dump_type(t): + if isinstance(t, type): + return t.__name__ + elif isinstance(t, str): + assert t in defs + return t + elif o := typing.get_origin(t): + # Lemme know if there's a better way to do this. + if o == list: + head = "List" + elif o == dict: + head = "Dict" + elif o == tuple: + if typing.get_args(t) == (): + return "Tuple[()]" + head = "Tuple" + elif o == Union: + args = typing.get_args(t) + assert len(args) == 2 and args[1] == type(None) + return f"Optional[{dump_type(args[0])}]" + else: + raise AssertionError(f"Type {t} is not supported in export schema.") + return ( + f"{head}[{', '.join([dump_type(x) for x in typing.get_args(t)])}]" + ) + elif t == (): + return "()" + else: + raise AssertionError(f"Type {t} is not supported in export schema.") + + def dump_field(f): + t = dump_type(f.type) + ret = {"type": t} + + value = dataclasses.MISSING + if f.default is not dataclasses.MISSING: + value = f.default + elif f.default_factory is not dataclasses.MISSING: + value = f.default_factory() + + if t.startswith("Optional[") and value is not None: + raise AssertionError( + f"Optional field {ty.__name__}.{f.name} must have default value to be None." + ) + + if value is not dataclasses.MISSING: + default = str(value) + ret["default"] = default + return ret + + return {f.name: dump_field(f) for f in dataclasses.fields(ty)} + + def _handle_int_enum(name, ty): + ret[name] = {"kind": "enum", "fields": {x.name: x.value for x in ty}} + + def _handle_struct(name, ty): + ret[name] = {"kind": "struct", "fields": _handle_aggregate(ty)} + + def _handle_union(name, ty): + ret[name] = {"kind": "union", "fields": _handle_aggregate(ty)} + + for name in dir(schema): + if name.startswith("_"): + continue + + value = getattr(schema, name) + + if hasattr(value, "__module__") and value.__module__ != schema.__name__: + continue + + defs[name] = value + + for name, value in defs.items(): + if isinstance(value, type): + if issubclass(value, IntEnum): + _handle_int_enum(name, value) + elif dataclasses.is_dataclass(value): + if issubclass(value, _Union): + _handle_union(name, value) + else: + _handle_struct(name, value) + else: + raise AssertionError(f"Unknown schema type {name}: {value}") + elif isinstance(value, (int, tuple)): + assert name in ("SCHEMA_VERSION", "TREESPEC_VERSION") + else: + raise AssertionError(f"Unknown variable {name}: {value}") + + ret["SCHEMA_VERSION"] = list(defs["SCHEMA_VERSION"]) + assert all(x > 0 for x in ret["SCHEMA_VERSION"]) + ret["TREESPEC_VERSION"] = defs["TREESPEC_VERSION"] + assert ret["TREESPEC_VERSION"] > 0 + return ret + + +def _diff_schema(dst, src): + additions = {key: src[key] for key in src.keys() - dst.keys()} + subtractions = {key: dst[key] for key in dst.keys() - src.keys()} + + common_keys = src.keys() & dst.keys() + + versions = {"SCHEMA_VERSION", "TREESPEC_VERSION"} + common_keys -= versions + + for key in common_keys: + src_kind = src[key]["kind"] + src_fields = src[key]["fields"] + dst_kind = dst[key]["kind"] + dst_fields = dst[key]["fields"] + _check( + src_kind == dst_kind, + f"Type {key} changed kind from {dst_kind} to {src_kind}", + ) + assert isinstance(src_fields, dict) and isinstance(dst_fields, dict) + added_fields = { + key: src_fields[key] for key in src_fields.keys() - dst_fields.keys() + } + subtracted_fields = { + key: dst_fields[key] for key in dst_fields.keys() - src_fields.keys() + } + common_fields = src_fields.keys() & dst_fields.keys() + + for field in common_fields: + src_field = src_fields[field] + dst_field = dst_fields[field] + if src_kind == "struct": + _check( + src_field["type"] == dst_field["type"], + f"Type of the field {key}.{field} changed from {dst_field['type']} to {src_field['type']}", + ) + if "default" in src_field and "default" not in dst_field: + added_fields[field] = {} + added_fields[field]["default"] = src_field["default"] + if "default" not in src_field and "default" in dst_field: + subtracted_fields[field] = {} + subtracted_fields[field]["default"] = dst_field["default"] + elif src_kind == "enum": + _check( + src_field == dst_field, + f"Value of the enum field {key}.{field} changed from {dst_field} to {src_field}", + ) + elif src_kind == "union": + _check( + src_field["type"] == dst_field["type"], + f"Type of the field {key}.{field} changed from {dst_field['type']} to {src_field['type']}", + ) + else: + raise AssertionError(f"Unknown kind {src_kind}: {key}") + if len(added_fields) > 0: + assert key not in additions + additions[key] = {} + additions[key]["fields"] = added_fields + if len(subtracted_fields) > 0: + assert key not in subtractions + subtractions[key] = {} + subtractions[key]["fields"] = subtracted_fields + + return additions, subtractions + + +def _hash_schema(s): + return hashlib.sha256(repr(s).encode("utf-8")).hexdigest() + + +@dataclasses.dataclass +class _Commit: + result: Dict[str, Any] + checksum_result: str + path: str + additions: Dict[str, Any] + subtractions: Dict[str, Any] + base: Dict[str, Any] + checksum_base: Optional[str] + + +def update_schema(): + import importlib.resources + + if importlib.resources.is_resource(__package__, "schema.yaml"): + content = importlib.resources.read_text(__package__, "schema.yaml") + match = re.search("checksum<<([A-Fa-f0-9]{64})>>", content) + _check(match is not None, "checksum not found in schema.yaml") + assert match is not None + checksum_base = match.group(1) + from yaml import load, Loader + + dst = load(content, Loader=Loader) + assert isinstance(dst, dict) + else: + checksum_base = None + dst = {"SCHEMA_VERSION": None, "TREESPEC_VERSION": None} + + src = _staged_schema() + additions, subtractions = _diff_schema(dst, src) + return _Commit( + result=src, + checksum_result=_hash_schema(src), + path=__package__.replace(".", "/") + "/schema.yaml", + additions=additions, + subtractions=subtractions, + base=dst, + checksum_base=checksum_base, + ) + + +def check(commit: _Commit, force_unsafe: bool = False): + next_version = None + reason = "" + # Step 1: Detect major schema updates. + if len(commit.additions) > 0: + for k, v in commit.additions.items(): + if k not in commit.base: + continue + kind = commit.result[k]["kind"] + fields = v["fields"] + for f, d in fields.items(): + if "default" not in d and kind == "struct": + reason += ( + f"Field {k}.{f} is added to schema.py without a default value as an incomparible change " + + "which requires major version bump.\n" + ) + next_version = [commit.base["SCHEMA_VERSION"][0] + 1, 1] + + if len(commit.subtractions) > 0: + for k, v in commit.subtractions.items(): + if k not in commit.result: + continue + for f in v["fields"]: + reason = f"Field {k}.{f} is removed from schema.py as an incompatible change which requires major version bump.\n" + next_version = [commit.base["SCHEMA_VERSION"][0] + 1, 1] + + if force_unsafe: + reason += "--force-unsafe is used." + next_version = commit.result["SCHEMA_VERSION"] + else: + # Step 2: Detect minor schema updates. + if next_version is None and len(commit.additions) > 0: + for k, v in commit.additions.items(): + for f in v["fields"]: + reason += ( + f"Field {k}.{f} is added to schema.py as an compatible change " + + "which still requires minor version bump.\n" + ) + next_version = [ + commit.base["SCHEMA_VERSION"][0], + commit.base["SCHEMA_VERSION"][1] + 1, + ] + if next_version is None and len(commit.subtractions) > 0: + for k, v in commit.subtractions.items(): + for f in v["fields"]: + reason += ( + f"Field {k}.{f} is removed from schema.py as an compatible change " + + "which still requires minor version bump.\n" + ) + next_version = [ + commit.base["SCHEMA_VERSION"][0], + commit.base["SCHEMA_VERSION"][1] + 1, + ] + + return next_version, reason diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/serialize.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..01625ec63c327df1f0986680d2d5fe349f211b0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/serialize.py @@ -0,0 +1,2434 @@ +import base64 +import copy +import dataclasses +import heapq +import inspect +import io +import json +import logging +import math +import operator +import typing +import copyreg + +from contextlib import contextmanager +from dataclasses import dataclass, field +from enum import Enum +from typing import ( + Any, + Callable, + cast, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + Union, +) + +import sympy + +import torch +import torch.export.exported_program as ep +from torch._export.serde.schema import SchemaVersion +from torch._export.verifier import load_verifier +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch.fx.experimental import symbolic_shapes +from torch.utils import _pytree as pytree +from torch.utils._pytree import treespec_dumps, treespec_loads +from torch.utils._sympy.value_ranges import ValueRanges + +from .schema import ( # type: ignore[attr-defined] + Argument, + BufferMutationSpec, + CustomObjArgument, + Device, + ExportedProgram, + GradientToParameterSpec, + GradientToUserInputSpec, + Graph, + GraphArgument, + GraphModule, + GraphSignature, + InputSpec, + InputToBufferSpec, + InputToCustomObjSpec, + InputToParameterSpec, + InputToTensorConstantSpec, + Layout, + LossOutputSpec, + MemoryFormat, + ModuleCallEntry, + ModuleCallSignature, + NamedArgument, + Node, + OptionalTensorArgument, + OutputSpec, + RangeConstraint, + ScalarType, + SCHEMA_VERSION, + SymBool, + SymBoolArgument, + SymExpr, + SymExprHint, + SymInt, + SymIntArgument, + TensorArgument, + TensorMeta, + TREESPEC_VERSION, + UserInputMutationSpec, + UserInputSpec, + UserOutputSpec, +) +from .union import _Union + + +__all__ = [ + "serialize", + "GraphModuleSerializer", + "ExportedProgramSerializer", + "GraphModuleDeserializer", + "ExportedProgramDeserializer", +] + +from .upgrade import GraphModuleOpUpgrader + +log = logging.getLogger(__name__) + + +class SerializeError(RuntimeError): + pass + + +def _reverse_map(d: Dict[Any, Enum]): + return {v.value: k for k, v in d.items()} + + +MetaType = Union[FakeTensor, int, torch.SymInt, bool, torch.SymBool, ep.CustomObjArgument] + + +ST_DELIMITER = ";" + +_TORCH_TO_SERIALIZE_DTYPE = { + torch.uint8: ScalarType.BYTE, + torch.int8: ScalarType.CHAR, + torch.int16: ScalarType.SHORT, + torch.int32: ScalarType.INT, + torch.int64: ScalarType.LONG, + torch.float16: ScalarType.HALF, + torch.float32: ScalarType.FLOAT, + torch.float64: ScalarType.DOUBLE, + torch.complex32: ScalarType.COMPLEXHALF, + torch.complex64: ScalarType.COMPLEXFLOAT, + torch.complex128: ScalarType.COMPLEXDOUBLE, + torch.bool: ScalarType.BOOL, + torch.bfloat16: ScalarType.BFLOAT16 +} + + +_SERIALIZE_TO_TORCH_DTYPE = _reverse_map(_TORCH_TO_SERIALIZE_DTYPE) # type: ignore[arg-type] + + +_TORCH_TO_SERIALIZE_LAYOUT = { + torch.sparse_coo: Layout.SparseCoo, + torch.sparse_csr: Layout.SparseCsr, + torch.sparse_csc: Layout.SparseCsc, + torch.sparse_bsr: Layout.SparseBsr, + torch.sparse_bsc: Layout.SparseBsc, + torch._mkldnn: Layout._mkldnn, # type: ignore[attr-defined] + torch.strided: Layout.Strided, +} + + +_SERIALIZE_TO_TORCH_LAYOUT = _reverse_map(_TORCH_TO_SERIALIZE_LAYOUT) # type: ignore[arg-type] + + +_TORCH_TO_SERIALIZE_MEMORY_FORMAT = { + torch.contiguous_format: MemoryFormat.ContiguousFormat, + torch.channels_last: MemoryFormat.ChannelsLast, + torch.channels_last_3d: MemoryFormat.ChannelsLast3d, + torch.preserve_format: MemoryFormat.PreserveFormat, +} + + +_SERIALIZE_TO_TORCH_MEMORY_FORMAT = _reverse_map(_TORCH_TO_SERIALIZE_MEMORY_FORMAT) # type: ignore[arg-type] + + +_SYM_INT_OPS = { + operator.mul, + operator.add, + operator.sub, + operator.floordiv, + operator.mod, + torch.sym_int, + torch.sym_ite, + torch.sym_max, + torch.sym_min, + torch.sym_sqrt, +} + + +_SYM_BOOL_OPS = { + operator.eq, + operator.ne, + operator.le, + operator.ge, + operator.lt, + operator.gt, + torch.sym_not, +} + + +@dataclass +class SerializedArtifact: + exported_program: Union[ExportedProgram, bytes] + state_dict: bytes + constants: bytes + + +def deserialize_device(d: Device) -> torch.device: + if d.index is None: + return torch.device(type=d.type) # type: ignore[call-overload] + return torch.device(type=d.type, index=d.index) + + +def serialize_sym_int(s: Union[int, torch.SymInt]) -> SymInt: + if isinstance(s, (torch.SymInt, int)): + if symbolic_shapes.is_concrete_int(s): + return SymInt.create(as_int=int(s)) + else: + assert isinstance(s, torch.SymInt) + if s.node.hint is None: + return SymInt.create(as_expr=SymExpr(str(s))) + else: + return SymInt.create(as_expr=SymExpr(str(s), hint=SymExprHint.create(as_int=s.node.hint))) + else: + raise SerializeError( + f"SymInt should be either symbol or int, got `{s}` of type `{type(s)}`" + ) + + +def serialize_sym_bool(s: Union[bool, torch.SymBool]) -> SymBool: + if isinstance(s, (torch.SymBool, bool)): + if symbolic_shapes.is_concrete_bool(s): + return SymBool.create(as_bool=bool(s)) + else: + return SymBool.create(as_expr=SymExpr(expr_str=str(s))) + else: + raise SerializeError( + f"SymBool should be either symbol or bool, got `{s}` of type `{type(s)}`" + ) + + +def serialize_tensor_meta(t: torch.Tensor) -> TensorMeta: + """ + Extract a TensorMeta describing `t`. + """ + return TensorMeta( + dtype=_TORCH_TO_SERIALIZE_DTYPE[t.dtype], + sizes=[serialize_sym_int(s) for s in t.shape], + requires_grad=t.requires_grad, + device=Device(type=t.device.type, index=t.device.index), + strides=[serialize_sym_int(s) for s in t.stride()], + storage_offset=serialize_sym_int(0), # TODO needs to be fixed. + layout=_TORCH_TO_SERIALIZE_LAYOUT[t.layout], + ) + + +_CURRENT_DESERIALIZER: Optional["GraphModuleDeserializer"] = None + + +def _reduce_fake_tensor(fake_tensor: FakeTensor): + is_parameter = isinstance(fake_tensor, torch.nn.Parameter) + tensor_meta = serialize_tensor_meta(fake_tensor) + tensor_meta_bytes = json.dumps(_dataclass_to_dict(tensor_meta), cls=EnumEncoder).encode("utf-8") + return _reconstruct_fake_tensor, (tensor_meta_bytes, is_parameter) + + +def _reconstruct_fake_tensor(serialized_tensor_meta: bytes, is_parameter: bool) -> FakeTensor: + # Deserialize the bytes into a TensorMeta + json_tensor_meta = json.loads(serialized_tensor_meta.decode("utf-8")) + tensor_meta = _dict_to_dataclass(TensorMeta, json_tensor_meta) + # Find the current fake mode + assert _CURRENT_DESERIALIZER is not None, "Need access to current deserializer state" + fake_tensor = _CURRENT_DESERIALIZER.deserialize_tensor_meta(tensor_meta) + if is_parameter: + fake_tensor = torch.nn.Parameter(fake_tensor) # type: ignore[assignment] + return fake_tensor + + +def serialize_torch_artifact(artifact: Dict[str, Any]) -> bytes: + assert FakeTensor not in copyreg.dispatch_table, "Refusing to stomp on existing FakeTensor reducer" + try: + copyreg.pickle(FakeTensor, _reduce_fake_tensor) + buffer = io.BytesIO() + # This is a workaround for backend's tensor deserialization problem: + # unpickleTensor() always create a tensor on the device where it was originally saved + # This behavior is bad for multi-gpu training, as we wish to directly load the tensor + # on the designated device. + # For now, we simply move the tensor to cpu before saving. + # TODO: this should be fixed by deserialization instead. + torch.save(artifact, buffer) + return buffer.getvalue() + finally: + del copyreg.dispatch_table[FakeTensor] + + +def deserialize_torch_artifact(serialized: bytes): + if len(serialized) == 0: + return {} + buffer = io.BytesIO(serialized) + buffer.seek(0) + artifact = torch.load(buffer) + assert isinstance(artifact, dict) + return artifact + + +def _sympy_int_to_int(val: sympy.Expr): + # Convert simple sympy Integers into concrete int + if val == sympy.oo: + return math.inf + if val == -sympy.oo: + return -math.inf + if isinstance(val, sympy.Integer): + return int(val) + raise RuntimeError( + "Export constraints cannot be non-integer expressions" + ) + + +def _int_to_sympy_int(val) -> sympy.Expr: + # Convert concrete int into simple sympy Integers + if val == math.inf: + return sympy.oo + if val == -math.inf: + return -sympy.oo + return sympy.Integer(val) + + +def serialize_range_constraints( + range_constraints: Dict[sympy.Symbol, ValueRanges] +) -> Dict[str, RangeConstraint]: + return { + str(k): RangeConstraint( + _sympy_int_to_int(v.lower), # type: ignore[arg-type] + _sympy_int_to_int(v.upper), # type: ignore[arg-type] + ) + for k, v in range_constraints.items() + } + + +def _is_single_tensor_return(target: torch._ops.OpOverload) -> bool: + returns = target._schema.returns + return len(returns) == 1 and isinstance(returns[0].real_type, torch.TensorType) + + +def _is_single_tensor_list_return(target: torch._ops.OpOverload) -> bool: + returns = target._schema.returns + if len(returns) != 1: + return False + return_type = returns[0].real_type + return isinstance(return_type, torch.ListType) and isinstance( + return_type.getElementType(), torch.TensorType + ) + + +@dataclass +class GraphState: + inputs: List[Argument] = field(default_factory=list) + outputs: List[Argument] = field(default_factory=list) + nodes: List[Node] = field(default_factory=list) + tensor_values: Dict[str, TensorMeta] = field(default_factory=dict) + sym_int_values: Dict[str, SymInt] = field(default_factory=dict) + sym_bool_values: Dict[str, SymBool] = field(default_factory=dict) + is_single_tensor_return: bool = False + custom_obj_values: Dict[str, CustomObjArgument] = field(default_factory=dict) + + +class GraphModuleSerializer: + def __init__( + self, + graph_signature: ep.ExportGraphSignature, + module_call_graph: List[ep.ModuleCallEntry] + ): + self.graph_state = GraphState() + self.graph_signature = graph_signature + self.module_call_graph = module_call_graph + self.custom_objs: Dict[str, torch._C.ScriptObject] = {} + + @contextmanager + def save_graph_state(self): + saved = self.graph_state + self.graph_state = GraphState() + try: + yield + finally: + self.graph_state = saved + + def handle_placeholder(self, node: torch.fx.Node): + assert node.op == "placeholder" + if isinstance(node.meta['val'], torch.Tensor): + graph_input = Argument.create(as_tensor=TensorArgument(name=node.name)) + self.graph_state.tensor_values[node.name] = serialize_tensor_meta(node.meta["val"]) + elif isinstance(node.meta['val'], torch.SymInt): + raise AssertionError("SymInt graph input is not implemented yet.") + elif isinstance(node.meta['val'], (int, bool, str, float, type(None))): + graph_input = self.serialize_input(node.meta['val']) + elif isinstance(node.meta['val'], ep.CustomObjArgument): + class_fqn = node.meta["val"].class_fqn + graph_input = Argument.create(as_custom_obj=CustomObjArgument(name=node.name, class_fqn=class_fqn)) + self.graph_state.custom_obj_values[node.name] = self.serialize_script_obj_meta(node.meta["val"]) + else: + raise AssertionError(f"Unimplemented graph input type: {node.meta['val']}") + self.graph_state.inputs.append(graph_input) + + def handle_output(self, node: torch.fx.Node): + assert node.op == "output" + assert len(node.args) == 1, "FX.Node's args should have one arg" + node_args = node.args[0] + if isinstance(node_args, torch.fx.Node): + # For singleton tensor returns + self.graph_state.is_single_tensor_return = True + self.graph_state.outputs = [self.serialize_input(node_args)] + else: + assert isinstance(node_args, (tuple, list)) + self.graph_state.outputs = [self.serialize_input(arg) for arg in node_args] + + def serialize_operator(self, target) -> str: + if isinstance(target, str): + return target + elif target.__module__.startswith("torch._ops"): + # TODO(zhxchen17) Maybe provide a function name helper in FX. + # From torch.fx.node._get_qualified_name + module = target.__module__.replace("torch._ops", "torch.ops") + return f"{module}.{target.__name__}" + else: # TODO(zhxchen17) Don't catch all here. + return f"{target.__module__}.{target.__name__}" + + def handle_call_function(self, node: torch.fx.Node): + assert node.op == "call_function" + + # getitem has been handled in the producer node, skip it here + if node.target is operator.getitem: + return + + if node.target in _SYM_INT_OPS: + assert len(node.kwargs) == 0 + meta_val = node.meta["val"] + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_sym_op_inputs(node.target, node.args), + outputs=[Argument.create(as_sym_int=self.serialize_sym_int_output(node.name, meta_val))], + metadata=self.serialize_metadata(node), + ) + elif node.target in _SYM_BOOL_OPS: + assert len(node.kwargs) == 0 + meta_val = node.meta["val"] + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_sym_op_inputs(node.target, node.args), + outputs=[Argument.create(as_sym_bool=self.serialize_sym_bool_output(node.name, meta_val))], + metadata=self.serialize_metadata(node), + ) + elif isinstance(node.target, torch._ops.OpOverload): + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_inputs(node.target, node.args, node.kwargs), + outputs=self.serialize_outputs(node), + # TODO: create a new tensor_values here, meta might have faketensor info + metadata=self.serialize_metadata(node), + ) + elif isinstance(node.target, torch._ops.HigherOrderOperator): + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_hoo_inputs(node.args, node.kwargs), + outputs=self.serialize_hoo_outputs(node), + metadata=self.serialize_metadata(node), + ) + else: + raise SerializeError(f"Serializing {node.target} is not supported") + + self.graph_state.nodes.append(ex_node) + + def handle_get_attr(self, node): + pass + + def serialize_metadata(self, node: torch.fx.Node) -> Dict[str, str]: + ret = {} + if stack_trace := node.meta.get("stack_trace"): + ret["stack_trace"] = stack_trace + + if nn_module_stack := node.meta.get("nn_module_stack"): + def export_nn_module_stack(val): + assert isinstance(val, tuple) and len(val) == 2 + path, ty = val + + assert isinstance(path, str) + + # node.meta["nn_module_stack"] could have two forms: + # 1. (path: str, module_type: 'type'), e.g. + # ('', ) + # 2. (path: str, module_type: str), e.g. + # ('', 'sigmoid.inference.MySimpleModel') + # ExportedProgram directly produced by torch.export() has form 1 + # ExportedProgram deserialized from disk has form 2 + # TODO: This is not ideal, we should fix this. + if isinstance(ty, str): + normalized_ty = ty + else: + normalized_ty = ty.__module__ + "." + ty.__qualname__ + + return path + "," + normalized_ty + + # Serialize to "key,orig_path,type_str" + nn_module_list = [ + f"{k},{export_nn_module_stack(v)}" + for k, v in nn_module_stack.items() + ] + ret["nn_module_stack"] = ST_DELIMITER.join(nn_module_list) + + if source_fn_st := node.meta.get("source_fn_stack"): + source_fn_list = [f"{source_fn[0]},{self.serialize_operator(source_fn[1])}" for source_fn in source_fn_st] + ret["source_fn_stack"] = ST_DELIMITER.join(source_fn_list) + + return ret + + def serialize_script_obj_meta(self, script_obj_meta: ep.CustomObjArgument) -> CustomObjArgument: + return CustomObjArgument( + name=script_obj_meta.name, + class_fqn=script_obj_meta.class_fqn, + ) + + def serialize_sym_op_inputs(self, op, args) -> List[NamedArgument]: + serialized_args = [] + args_names = inspect.signature(op).parameters.keys() + for args_name, arg in zip(args_names, args): + serialized_args.append( + NamedArgument(name=args_name, arg=self.serialize_input(arg)) + ) + return serialized_args + + def serialize_inputs( + self, target: torch._ops.OpOverload, args, kwargs=None + ) -> List[NamedArgument]: + assert isinstance(target, torch._ops.OpOverload) + kwargs = kwargs or {} + serialized_args = [] + for i, schema_arg in enumerate(target._schema.arguments): + if schema_arg.name in kwargs: + serialized_args.append( + NamedArgument( + name=schema_arg.name, + arg=self.serialize_input(kwargs[schema_arg.name]), + ) + ) + elif not schema_arg.kwarg_only and i < len(args): + serialized_args.append( + NamedArgument( + name=schema_arg.name, + arg=self.serialize_input(args[i]), + ) + ) + else: + # We intentionally don't serialize the missing arguments + # with default values + pass + + + return serialized_args + + def serialize_hoo_inputs(self, args, kwargs) -> List[NamedArgument]: + """ + For serializing HOO inputs since HOOs do not have a schema. + """ + inputs = [ + NamedArgument( + name="", + arg=self.serialize_input(a), + ) for a in args + ] + inputs.extend([ + NamedArgument( + name=name, + arg=self.serialize_input(a) + ) for name, a in kwargs.items() + ]) + return inputs + + def is_sym_int_arg(self, arg) -> bool: + return isinstance(arg, int) or ( + isinstance(arg, torch.fx.Node) and arg.name in self.graph_state.sym_int_values + ) + + def is_sym_bool_arg(self, arg) -> bool: + return isinstance(arg, bool) or ( + isinstance(arg, torch.fx.Node) and arg.name in self.graph_state.sym_bool_values + ) + + def serialize_input(self, arg) -> Argument: + import torch._inductor.ir as inductor_ir + inductor_tensor_buffers = ( + inductor_ir.Buffer, + inductor_ir.ReinterpretView, + ) + + if isinstance(arg, torch.fx.Node): + if arg.op == "get_attr": + assert isinstance(arg.target, str) + attr = getattr(arg.graph.owning_module, arg.target) + + if isinstance(attr, torch.Tensor): + raise SerializeError("getattr nodes containing tensors should not appear in the graph") + elif isinstance(attr, torch.fx.GraphModule): + with self.save_graph_state(): + graph = self.serialize_graph(attr) + return Argument.create(as_graph=GraphArgument(name=arg.target, graph=graph)) + else: + raise SerializeError(f"Unsupported getattr attribute {arg.target} with type: {type(attr)}") + elif self.is_sym_int_arg(arg): + return Argument.create(as_sym_int=SymIntArgument.create(as_name=arg.name)) + elif self.is_sym_bool_arg(arg): + return Argument.create(as_sym_bool=SymBoolArgument.create(as_name=arg.name)) + else: + if isinstance(arg.meta["val"], ep.CustomObjArgument): + return Argument.create(as_custom_obj=CustomObjArgument(name=arg.name, class_fqn=arg.meta["val"].class_fqn)) + return Argument.create(as_tensor=TensorArgument(name=arg.name)) + elif isinstance(arg, inductor_tensor_buffers): + # Other branches are for arguments in fx node. + # This is a special branch for handling buffers (representing tensor arguments) + # for inductor's ExternalFallbackNode + # export_extern_kernel_node() is using this function to serialize arguments + arg_name = arg.get_name() + assert arg_name is not None, "Buffer must have valid name" + return Argument.create(as_tensor=TensorArgument(name=arg_name)) + elif isinstance(arg, torch.SymInt): + # This is a special branch for handling SymInt args in inductor's + # ExternalFallbackNode. + # For regular FX graph, SymInt arg should be a fx.Node with + # self.is_sym_int_arg(arg) being true + return Argument.create(as_sym_int=SymIntArgument.create(as_name=str(arg))) + elif isinstance(arg, bool): + return Argument.create(as_bool=arg) + elif isinstance(arg, str): + return Argument.create(as_string=arg) + elif isinstance(arg, int): + return Argument.create(as_int=arg) + elif isinstance(arg, float): + return Argument.create(as_float=arg) + elif arg is None: + return Argument.create(as_none=()) + elif isinstance(arg, (list, tuple)): + # Must check bool first, as bool is also treated as int + if all(isinstance(a, bool) for a in arg): + return Argument.create(as_bools=list(arg)) + elif all(isinstance(a, int) for a in arg): + return Argument.create(as_ints=list(arg)) + elif all(isinstance(a, float) for a in arg): + return Argument.create(as_floats=list(arg)) + elif all(isinstance(a, str) for a in arg): + return Argument.create(as_strings=list(arg)) + elif all(isinstance(a, torch.SymInt) for a in arg): + # This is a special branch for handling SymInt args in inductor's + # ExternalFallbackNode. + # For regular FX graph, SymInt arg should be a fx.Node with + # self.is_sym_int_arg(arg) being true + return Argument.create( + as_sym_ints=[SymIntArgument.create(as_name=str(a)) for a in arg] + ) + elif all(self.is_sym_int_arg(a) for a in arg): + # list of sym_ints + values = [] + for a in arg: + if isinstance(a, torch.fx.Node): + values.append(SymIntArgument.create(as_name=a.name)) + elif isinstance(a, int): + values.append(SymIntArgument.create(as_int=a)) + return Argument.create(as_sym_ints=values) + elif all(self.is_sym_bool_arg(a) for a in arg): + # list of sym_bools + values = [] + for a in arg: + if isinstance(a, torch.fx.Node): + values.append(SymBoolArgument.create(as_name=a.name)) + elif isinstance(a, bool): + values.append(SymBoolArgument.create(as_bool=a)) + return Argument.create(as_sym_bools=values) + elif all(isinstance(a, torch.fx.Node) for a in arg): + # list of tensors + arguments = [] + for a in arg: + if a.op == "get_attr": + raise SerializeError("getattr nodes containing tensors should not appear in the graph") + arguments.append(TensorArgument(name=a.name)) + return Argument.create(as_tensors=arguments) + elif all(isinstance(a, (torch.fx.Node, type(None))) for a in arg): + # list of optional tensors + def serialize_optional_tensor_args(a): + if a is None: + return OptionalTensorArgument.create(as_none=()) + elif isinstance(a, torch.fx.Node): + return OptionalTensorArgument.create(as_tensor=a.name) + else: + raise SerializeError(f"Unsupported list/tuple argument: {a}") + return Argument.create( + as_optional_tensors=list(map(serialize_optional_tensor_args, arg)) + ) + elif all(isinstance(a, inductor_tensor_buffers) for a in arg): + # list of inductor buffers + return Argument.create( + as_tensors=[TensorArgument(name=a.get_name()) for a in arg], + ) + elif all(isinstance(a, (*inductor_tensor_buffers, type(None))) for a in arg): + # list of inductor buffers as optional tensors + def serialize_optional_tensor_args(a): + if a is None: + return OptionalTensorArgument.create(as_none=()) + elif isinstance(a, inductor_tensor_buffers): + return OptionalTensorArgument.create(as_tensor=a.get_name()) + else: + raise SerializeError(f"Unsupported list/tuple argument: {a}") + return Argument.create( + as_optional_tensors=list(map(serialize_optional_tensor_args, arg)) + ) + else: + raise SerializeError(f"Unsupported list/tuple argument type: {[type(a) for a in arg]}") + elif isinstance(arg, torch.dtype): + return Argument.create(as_scalar_type=_TORCH_TO_SERIALIZE_DTYPE[arg]) + elif isinstance(arg, torch.device): + return Argument.create(as_device=Device(type=arg.type, index=arg.index)) + elif isinstance(arg, torch.memory_format): + return Argument.create(as_memory_format=_TORCH_TO_SERIALIZE_MEMORY_FORMAT[arg]) + elif isinstance(arg, torch.layout): + return Argument.create(as_layout=_TORCH_TO_SERIALIZE_LAYOUT[arg]) + elif isinstance(arg, torch._C.ScriptObject): + if not ( + arg._has_method("__getstate__") and # type: ignore[attr-defined] + arg._has_method("__setstate__") # type: ignore[attr-defined] + ): + raise SerializeError( + f"Unable to serialize custom class {arg}. Please define " + "serialization methods via def_pickle()." + ) + # Custom objects through torchind are serializable with pickle, + # through implementing the .def_pickle function. This should result + # in the object containing a __getstate__ and __setstate__ + # serialize/deserialize function. + custom_obj_name = f"_custom_obj_{len(self.custom_objs)}" + self.custom_objs[custom_obj_name] = arg + class_fqn = arg._type().qualified_name() # type: ignore[attr-defined] + return Argument.create(as_custom_obj=CustomObjArgument(custom_obj_name, class_fqn)) + elif isinstance(arg, torch._ops.OpOverload): + return Argument.create(as_operator=self.serialize_operator(arg)) + else: + raise SerializeError(f"Unsupported argument type: {type(arg)}") + + def serialize_tensor_output(self, name, meta_val) -> TensorArgument: + assert name not in self.graph_state.tensor_values + self.graph_state.tensor_values[name] = serialize_tensor_meta(meta_val) + return TensorArgument(name=name) + + def serialize_sym_int_output(self, name, meta_val) -> SymIntArgument: + assert name not in self.graph_state.sym_int_values + self.graph_state.sym_int_values[name] = serialize_sym_int(meta_val) + return SymIntArgument.create(as_name=name) + + def serialize_sym_bool_output(self, name, meta_val) -> SymIntArgument: + assert name not in self.graph_state.sym_bool_values + self.graph_state.sym_bool_values[name] = serialize_sym_bool(meta_val) + return SymBoolArgument.create(as_name=name) + + def serialize_input_spec(self, spec: ep.InputSpec) -> InputSpec: + if spec.kind == ep.InputKind.USER_INPUT: + return InputSpec.create( + user_input=UserInputSpec( + arg=self.serialize_argument_spec(spec.arg) + ) + ) + elif spec.kind == ep.InputKind.PARAMETER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + parameter=InputToParameterSpec( + arg=TensorArgument(name=spec.arg.name), + parameter_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.BUFFER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + assert spec.persistent is not None + return InputSpec.create( + buffer=InputToBufferSpec( + arg=TensorArgument(name=spec.arg.name), + buffer_name=spec.target, + persistent=spec.persistent, + ) + ) + elif spec.kind == ep.InputKind.CONSTANT_TENSOR: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + tensor_constant=InputToTensorConstantSpec( + arg=TensorArgument(name=spec.arg.name), + tensor_constant_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.CUSTOM_OBJ: + assert spec.target is not None + assert isinstance(spec.arg, ep.CustomObjArgument) + return InputSpec.create( + custom_obj=InputToCustomObjSpec( + arg=CustomObjArgument(name=spec.arg.name, class_fqn=spec.arg.class_fqn), + custom_obj_name=spec.target, + ) + ) + else: + raise AssertionError(f"Unknown argument kind: {spec}") + + def serialize_output_spec(self, spec: ep.OutputSpec) -> OutputSpec: + if spec.kind == ep.OutputKind.USER_OUTPUT: + return OutputSpec.create( + user_output=UserOutputSpec( + arg=self.serialize_argument_spec(spec.arg) + ) + ) + elif spec.kind == ep.OutputKind.LOSS_OUTPUT: + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + loss_output=LossOutputSpec( + arg=TensorArgument(name=spec.arg.name) + ) + ) + elif spec.kind == ep.OutputKind.BUFFER_MUTATION: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + buffer_mutation=BufferMutationSpec( + arg=TensorArgument(name=spec.arg.name), + buffer_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.GRADIENT_TO_PARAMETER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + gradient_to_parameter=GradientToParameterSpec( + arg=TensorArgument(name=spec.arg.name), + parameter_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.GRADIENT_TO_USER_INPUT: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + gradient_to_user_input=GradientToUserInputSpec( + arg=TensorArgument(name=spec.arg.name), + user_input_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.USER_INPUT_MUTATION: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + user_input_mutation=UserInputMutationSpec( + arg=TensorArgument(name=spec.arg.name), + user_input_name=spec.target, + ) + ) + else: + raise AssertionError(f"Unknown argument kind: {spec}") + + def serialize_signature(self, sig: ep.ExportGraphSignature) -> GraphSignature: + return GraphSignature( + input_specs=[self.serialize_input_spec(s) for s in sig.input_specs], + output_specs=[self.serialize_output_spec(s) for s in sig.output_specs], + ) + + def serialize_argument_spec(self, x: ep.ArgumentSpec) -> Argument: + if isinstance(x, ep.TensorArgument): + return Argument.create(as_tensor=TensorArgument(name=x.name)) + elif isinstance(x, ep.SymIntArgument): + return Argument.create(as_sym_int=SymIntArgument.create(as_name=x.name)) + elif isinstance(x, ep.ConstantArgument): + return self.serialize_input(x.value) + elif isinstance(x, ep.CustomObjArgument): + return Argument.create(as_custom_obj=CustomObjArgument(name=x.name, class_fqn=x.class_fqn)) + else: + raise AssertionError("TODO") + + def serialize_module_call_signature(self, module_call_signature: ep.ModuleCallSignature) -> ModuleCallSignature: + return ModuleCallSignature( + inputs=[self.serialize_argument_spec(x) for x in module_call_signature.inputs], + outputs=[self.serialize_argument_spec(x) for x in module_call_signature.outputs], + in_spec=treespec_dumps(module_call_signature.in_spec, TREESPEC_VERSION), + out_spec=treespec_dumps(module_call_signature.out_spec, TREESPEC_VERSION), + ) + + def serialize_module_call_graph(self, module_call_graph: List[ep.ModuleCallEntry]) -> List[ModuleCallEntry]: + return [ + ModuleCallEntry( + fqn=entry.fqn, + signature=self.serialize_module_call_signature(entry.signature) if entry.signature else None, + ) for entry in module_call_graph + ] + + def serialize_outputs(self, node: torch.fx.Node) -> List[Argument]: + """For a given node, return the dataclass representing its output values. + + [NOTE: Multiple outputs] We handle aggregates differently than FX. For + FX, it looks like: + + x = call_function("multiple_return", ...) + element0 = call_function(getitem, x, 0) + foo = call_function("use_output", element0) + + We do not want the intermediate `getitem` call, so our serialized thing looks like: + + element0, element1, element2 = call_function("multiple_return", ...) + foo = call_function("use_output", element0) + + We want names to be consistent across these two schemes, so that we can + mostly reuse the names coming from FX. This function computes a mapping from + the FX representation to our representation, preserving the names. + """ + assert node.op == "call_function" and isinstance(node.target, torch._ops.OpOverload) + + assert isinstance(node.target, torch._ops.OpOverload) + returns = node.target._schema.returns + + if len(returns) == 0: + return [] + + meta_val = node.meta["val"] + + def output_node_at_index(node, index): + for user in node.users: + assert user.target is operator.getitem, f"{user} is not a getitem node" + if index == user.args[1]: + return user + return None + + # Check single value return + if _is_single_tensor_list_return(node.target): + # e.g "-> Tensor[]" + tensor_args = [] + for idx, meta in enumerate(meta_val): + user_node = output_node_at_index(node, idx) + name = ( + user_node.name + if user_node is not None + else f"{node.name}_unused_{idx}" + ) + tensor_args.append(self.serialize_tensor_output(name, meta)) + return [Argument.create(as_tensors=tensor_args)] + elif len(returns) == 1: + return [self.serialize_output(node.name, meta_val)] + + # There are a two possibilities at this point: + # - This operator returns a tuple of Tensors, e.g. "-> (Tensor, Tensor)" + # - This operator returns a tuple of mixed of Tensor and Tensors, e.g. "-> (Tensor, Tensor[])" + # + # Either way, start by gathering a list of TensorArguments with the correct names. + # For consistent naming with FX, consult the downstream `getitem` node and + # make sure our outputs have the same name. + + output_arguments = [] + for idx, (meta, return_schema) in enumerate(zip(meta_val, returns)): + if meta is None: + assert isinstance(return_schema.real_type, (torch.OptionalType, torch.TensorType)) + # When the return type is annoated as Tensor type, the op can also return an + # undefined Tensor which will be implicitly converted to None in Python. + output_arguments.append(Argument.create(as_none=())) + elif isinstance(meta, FakeTensor): + assert isinstance(return_schema.real_type, torch.TensorType) + user_node = output_node_at_index(node, idx) + name = ( + user_node.name + if user_node is not None + else f"{node.name}_unused_{idx}" + ) + output_arguments.append(self.serialize_output(name, meta)) + elif isinstance(meta, list): + # for List[Tensor] return type + assert isinstance( + return_schema.real_type, torch.ListType + ) and isinstance( + return_schema.real_type.getElementType(), torch.TensorType + ) + user_node = output_node_at_index(node, idx) + assert user_node is not None + + args = [] + for i, m in enumerate(meta): + if m is None: + continue + sub_user_node = output_node_at_index(user_node, i) + assert sub_user_node is not None, f"No user found at index {i}" + + args.append(self.serialize_tensor_output(sub_user_node.name, m)) + output_arguments.append(Argument.create(as_tensors=args)) + elif isinstance(meta, (int, SymInt)): + user_node = output_node_at_index(node, idx) + name = ( + user_node.name + if user_node is not None + else f"{node.name}_unused_{idx}" + ) + output_arguments.append(self.serialize_output(name, meta)) + else: + raise ValueError(f"Unhandled output type {type(meta)} from node {node.format_node()}") + + return output_arguments + + def serialize_hoo_outputs(self, node: torch.fx.Node) -> List[Argument]: + """ + For serializing HOO outputs since HOOs do not have a schema. + """ + meta_val = node.meta["val"] + + if isinstance(meta_val, tuple): + # Note: Since we don't have a schema, we just serialize all tuple + # outputs to be a list of values. Even if the output is supposed to + # be a tensor list (Tensor[]), we will serialize it to be a list of + # tensors (Tensor, Tensor, Tensor). An exception is that if there's + # a singleton tensor, we will serialize this to be a singleton + # tensor list so that the deserializer knows to insert getitem nodes. + + idx_to_name = {} + for user in node.users: + if user.target is not operator.getitem: + continue + idx_to_name[user.args[1]] = user.name + + for idx in range(len(meta_val)): + # FX does not emit a getitem node for any outputs that are unused. + # However, we need a name for them so that the number of outputs will + # correctly match the schema. Just assign a dummy name. + if idx not in idx_to_name: + idx_to_name[idx] = f"{node.name}_unused_{idx}" + + if len(meta_val) == 1: + tensors = [] + for i, v in enumerate(meta_val): + assert isinstance(v, torch.Tensor) + tensors.append(self.serialize_tensor_output(idx_to_name[i], v)) + return [Argument.create(as_tensors=tensors)] + + else: + return [ + self.serialize_output(idx_to_name[i], element_meta_val) + for i, element_meta_val in enumerate(meta_val) + ] + + else: + return [self.serialize_output(node.name, meta_val)] + + def serialize_output(self, name: str, meta_val: Any) -> Argument: + # Check single value return + if meta_val is None: + return Argument.create(as_none=()) + if isinstance(meta_val, torch.Tensor): + # e.g "-> Tensor" + return Argument.create(as_tensor=self.serialize_tensor_output(name, meta_val)) + elif isinstance(meta_val, (int, torch.SymInt)): + # e.g "-> SymInt" + return Argument.create(as_sym_int=self.serialize_sym_int_output(name, meta_val)) + elif isinstance(meta_val, torch.SymBool): + # e.g "-> SymBool" + return Argument.create(as_sym_bool=self.serialize_sym_bool_output(name, meta_val)) + + # list outputs should've been handled earlier + raise SerializeError(f"Unable to serialize output {meta_val}") + + def _handle_getitem_users(self, node: torch.fx.Node) -> List[TensorArgument]: + meta_val = node.meta["val"] + + idx_to_name = {} + for user in node.users: + assert user.target is operator.getitem, f"User node {user} of {node} is incorrect" + idx_to_name[user.args[1]] = user.name + + for idx, _ in enumerate(meta_val): + # FX does not emit a getitem node for any outputs that are unused. + # However, we need a name for them so that the number of outputs will + # correctly match the schema. Just assign a dummy name. + if idx not in idx_to_name: + idx_to_name[idx] = f"{node.name}_unused_{idx}" + + arg_list = [] + for i, element_meta_val in enumerate(meta_val): + arg_list.append( + self.serialize_tensor_output(idx_to_name[i], element_meta_val) + ) + + return arg_list + + def serialize_graph(self, graph_module: torch.fx.GraphModule) -> Graph: + assert isinstance(graph_module, torch.fx.GraphModule) + for node in graph_module.graph.nodes: + try: + getattr(self, f"handle_{node.op}")(node) + except Exception as e: + raise SerializeError(f"Failed serializing node {node} in graph: {node.format_node()}") from e + + return Graph( + inputs=self.graph_state.inputs, + nodes=self.graph_state.nodes, + tensor_values=self.graph_state.tensor_values, + sym_int_values=self.graph_state.sym_int_values, + sym_bool_values=self.graph_state.sym_bool_values, + custom_obj_values=self.graph_state.custom_obj_values, + outputs=self.graph_state.outputs, + is_single_tensor_return=self.graph_state.is_single_tensor_return, + ) + + def serialize(self, graph_module: torch.fx.GraphModule) -> GraphModule: + graph = self.serialize_graph(graph_module) + + return GraphModule( + graph=graph, + signature=self.serialize_signature(self.graph_signature), + module_call_graph=self.serialize_module_call_graph(self.module_call_graph), + ) + + +class ExportedProgramSerializer: + def __init__(self, opset_version: Optional[Dict[str, int]] = None): + self.opset_version: Dict[str, int] = {} + if opset_version: + self.opset_version.update(opset_version) + if "aten" not in self.opset_version: + self.opset_version["aten"] = torch._C._get_max_operator_version() + + def serialize(self, exported_program: ep.ExportedProgram) -> SerializedArtifact: + """ + Args: + exported_program: Exported Program to serialize + """ + if type(self) == ExportedProgramSerializer: + exported_program._validate() + + gm_serializer = GraphModuleSerializer( + exported_program.graph_signature, + exported_program.module_call_graph + ) + serialized_graph_module = gm_serializer.serialize(exported_program.graph_module) + serialized_range_constraints = serialize_range_constraints(exported_program.range_constraints) + + # TODO: Directly serialize exported_program.constants once + # CustomClassHolders get stored in the ExportedProgram rather than in + # the graph + constants = {} + for n, c in gm_serializer.custom_objs.items(): + constants[n] = c + for n, t in exported_program.constants.items(): + assert n not in constants + constants[n] = t + + serialized_ep = ExportedProgram( + graph_module=serialized_graph_module, + opset_version=self.opset_version, + range_constraints=serialized_range_constraints, + schema_version=SchemaVersion( + major=SCHEMA_VERSION[0], + minor=SCHEMA_VERSION[1], + ), + dialect=exported_program.dialect, + ) + + # Test canonical form is well defined. + canonicalize(serialized_ep) + + return SerializedArtifact( + serialized_ep, + serialize_torch_artifact(exported_program.state_dict), + serialize_torch_artifact(constants), + ) + + +class GraphModuleDeserializer: + @dataclasses.dataclass + class Result: + graph_module: torch.fx.GraphModule + signature: ep.ExportGraphSignature + module_call_graph: List[ep.ModuleCallEntry] + names_to_symbols: Dict[str, sympy.Symbol] + state_dict: Dict[str, Union[torch.Tensor, torch.nn.Parameter]] + constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]] + + def __init__(self): + self.serialized_name_to_node: Dict[str, torch.fx.Node] = {} + self.serialized_name_to_meta: Dict[str, MetaType] = {} + self.graph = torch.fx.Graph() + self.module = torch.nn.Module() + + @contextmanager + def save_graph_module(self) -> Iterator[None]: + saved = self.graph, self.module, self.serialized_name_to_node, self.serialized_name_to_meta + self.graph = torch.fx.Graph() + self.module = torch.nn.Module() + self.serialized_name_to_node = {} + self.serialized_name_to_meta = {} + try: + yield + finally: + self.graph, self.module, self.serialized_name_to_node, self.serialized_name_to_meta = saved + + def deserialize_operator(self, serialized_target: str): + if serialized_target.startswith("_operator"): # TODO(zhxchen17) Follow up on this. + module = operator + serialized_target_names = serialized_target.split(".")[1:] + elif serialized_target.startswith("torch"): + module = torch # type: ignore[misc] + serialized_target_names = serialized_target.split(".")[1:] + else: # TODO(zhxchen17) Don't catch all here. + return serialized_target + + target = module + for name in serialized_target_names: + if not hasattr(target, name): + return serialized_target + else: + target = getattr(target, name) + return target + + def deserialize_sym_int(self, s: SymInt) -> Union[int, torch.SymInt]: + val = s.value + if s.type == "as_expr": + if val.expr_str in self.symbol_name_to_symbol: + sym = self.symbol_name_to_symbol[val.expr_str] + else: + sym = sympy.sympify(val.expr_str, locals=self.symbol_name_to_symbol) + # NOTE(avik): Assumptions on symbols are not explicitly serialized. + # This seems dangerous: it might cause unknown differences in shape env behavior + # on deserialization? Probably deserves a follow-up. + + # Here we force symbols corresponding to SymInts to be at least integers. + # Otherwise some expressions that the shape env would otherwise evaluate to False, + # e.g., 2*s = 9, can have rational solutions, e.g., 9/2. + sym = sym.subs({s: sympy.Symbol(s.name, integer=True) for s in sym.free_symbols}) + if isinstance(sym, sympy.Symbol): + self.symbol_name_to_symbol[val.expr_str] = sym + + if vr := self.symbol_name_to_range.get(val.expr_str): + symbolic_shapes._constrain_symbol_range( + self.shape_env, + sym, + compiler_min=vr.lower, # type: ignore[arg-type] + compiler_max=vr.upper, # type: ignore[arg-type] + ) + else: + # Placeholders, in particular, can have shapes as symbolic expressions. + # We need to populate the shape env with the range constraints of their + # free symbols, otherwise evaluating such expressions will error. + self.symbol_name_to_symbol[val.expr_str] = sym + free_symbols = sym.free_symbols + for s in free_symbols: + if s.name not in self.symbol_name_to_symbol: + self.symbol_name_to_symbol[s.name] = s + if vr := self.symbol_name_to_range.get(s.name): + symbolic_shapes._constrain_symbol_range( + self.shape_env, + s, + compiler_min=vr.lower, # type: ignore[arg-type] + compiler_max=vr.upper, # type: ignore[arg-type] + ) + + + if val.hint is None: + hint = None + else: + assert val.hint.type == "as_int" + hint = val.hint.value + + return self.shape_env.create_symintnode(sym, hint=hint) + elif s.type == "as_int": + assert isinstance(val, int) + return val + else: + raise SerializeError( + f"SymInt has invalid field type {s.type} with value {s.value}" + ) + + def deserialize_sym_bool(self, s: SymBool) -> Union[bool, torch.SymBool]: + val = s.value + if s.type == "as_expr": + expr = sympy.sympify(val.expr_str, locals=self.symbol_name_to_symbol) + return self.shape_env.create_symboolnode(expr) + elif s.type == "as_bool": + assert isinstance(val, bool) + return val + else: + raise SerializeError( + f"SymBool has invalid field type {s.type} with value {s.value}" + ) + + def deserialize_tensor_meta( + self, + tensor_meta: TensorMeta, + ) -> FakeTensor: + with self.fake_tensor_mode: + return cast( + FakeTensor, + torch.empty_strided( + tuple(self.deserialize_sym_int(val) for val in tensor_meta.sizes), # type: ignore[misc] + tuple(self.deserialize_sym_int(val) for val in tensor_meta.strides), # type: ignore[misc] + device=deserialize_device(tensor_meta.device), + dtype=_SERIALIZE_TO_TORCH_DTYPE[tensor_meta.dtype], + ), + ) + + def deserialize_script_obj_meta(self, script_obj_meta: CustomObjArgument) -> ep.CustomObjArgument: + return ep.CustomObjArgument( + name=script_obj_meta.name, + class_fqn=script_obj_meta.class_fqn, + ) + + def deserialize_graph_output(self, output) -> torch.fx.Node: + if output.type == "as_tensor": + return self.serialized_name_to_node[output.as_tensor.name] + elif output.type == "as_sym_int": + return self.serialized_name_to_node[output.as_sym_int.as_name] + elif output.type == "as_sym_bool": + return self.serialized_name_to_node[output.as_sym_bool.as_name] + else: + raise SerializeError(f"Unable to deserialize output node {output}") + + def deserialize_graph(self, serialized_graph: Graph) -> torch.fx.Graph: + # Handle the tensor metas. + for name, tensor_value in serialized_graph.tensor_values.items(): + meta_val = self.deserialize_tensor_meta(tensor_value) + self.serialized_name_to_meta[name] = meta_val + + for name, sym_int_value in serialized_graph.sym_int_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_sym_int(sym_int_value) + + for name, sym_bool_value in serialized_graph.sym_bool_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_sym_bool(sym_bool_value) + + for name, script_obj_meta in serialized_graph.custom_obj_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_script_obj_meta(script_obj_meta) + + # Inputs: convert to placeholder nodes in FX. + for i, input_ in enumerate(serialized_graph.inputs): + if input_.type in ("as_tensor", "as_sym_int", "as_custom_obj"): + node_name = input_.value.name + placeholder_node = self.graph.placeholder(node_name) + self.sync_fx_node(node_name, placeholder_node) + elif input_.type in ("as_int", "as_float", "as_bool", "as_none", "as_string"): + node_name = f"arg{i}" + placeholder_node = self.graph.placeholder(node_name) + placeholder_node.meta["val"] = self.deserialize_input(input_) + else: + raise SerializeError(f"Invalid input type {input_}") + + # Nodes: convert to call_function nodes. + for serialized_node in serialized_graph.nodes: + try: + target = self.deserialize_operator(serialized_node.target) + self.deserialize_node(serialized_node, target) + + except Exception as e: + raise SerializeError(f"Failed deserializing node {serialized_node}") from e + + # Outputs: convert to a single `output` node. + outputs = [] + for output in serialized_graph.outputs: + outputs.append(self.deserialize_graph_output(output)) + + if serialized_graph.is_single_tensor_return: + assert len(outputs) == 1 + outputs = outputs[0] # type: ignore[assignment] + else: + outputs = tuple(outputs) # type: ignore[assignment] + + output_node = self.graph.output(outputs) + + if serialized_graph.is_single_tensor_return: + output_node.meta["val"] = output_node.args[0].meta["val"] + else: + output_node.meta["val"] = tuple( + arg.meta["val"] for arg in output_node.args[0] + ) + + return self.graph + + def deserialize_node(self, serialized_node: Node, target: Callable) -> None: + if target in _SYM_BOOL_OPS or target in _SYM_INT_OPS: + name = serialized_node.outputs[0].value.as_name + args = self.deserialize_sym_op_inputs(serialized_node.inputs) + + fx_node = self.graph.create_node("call_function", target, args, {}, name) + self.deserialize_sym_op_outputs(serialized_node, fx_node) + + elif isinstance(target, torch._ops.HigherOrderOperator): + args, kwargs = self.deserialize_hoo_inputs(serialized_node.inputs) + # If HOP returns a single tensor, name the + # newly-created node after it. This ensures that these tensor values + # have names that are consistent with serialized. + # + # HOPs don't have schema yet, just check the output lengths and as_tensor attribute + name = ( + serialized_node.outputs[0].as_tensor.name + if len(serialized_node.outputs) == 1 and hasattr(serialized_node.outputs[0], "as_tensor") + else None + ) + fx_node = self.graph.create_node( + "call_function", target, args, kwargs, name + ) + self.deserialize_outputs(serialized_node, fx_node) + fx_node.meta.update(self.deserialize_metadata(serialized_node.metadata)) + + elif isinstance(target, torch._ops.OpOverload): + # For convenience: if this node returns a single tensor, name the + # newly-created node after it. This ensures that these tensor values + # have names that are consistent with serialized. + name = ( + serialized_node.outputs[0].as_tensor.name + if _is_single_tensor_return(target) + else None # FX will generate a name for us. + ) + args, kwargs = self.deserialize_inputs(target, serialized_node) + fx_node = self.graph.create_node("call_function", target, args, kwargs, name) + self.deserialize_outputs(serialized_node, fx_node) + else: + raise SerializeError(f"Unsupported target type for node {serialized_node}: {target}") + + fx_node.meta.update(self.deserialize_metadata(serialized_node.metadata)) + + def deserialize_input_spec(self, i: InputSpec) -> ep.InputSpec: + if i.type == "user_input": + return ep.InputSpec( + kind=ep.InputKind.USER_INPUT, + arg=self.deserialize_argument_spec(i.user_input.arg), + target=None + ) + elif i.type == "parameter": + return ep.InputSpec( + kind=ep.InputKind.PARAMETER, + arg=ep.TensorArgument(name=i.parameter.arg.name), + target=i.parameter.parameter_name, + ) + elif i.type == "buffer": + return ep.InputSpec( + kind=ep.InputKind.BUFFER, + arg=ep.TensorArgument(name=i.buffer.arg.name), + target=i.buffer.buffer_name, + persistent=i.buffer.persistent, + ) + elif i.type == "tensor_constant": + return ep.InputSpec( + kind=ep.InputKind.CONSTANT_TENSOR, + arg=ep.TensorArgument(name=i.tensor_constant.arg.name), + target=i.tensor_constant.tensor_constant_name, + ) + elif i.type == "custom_obj": + return ep.InputSpec( + kind=ep.InputKind.CUSTOM_OBJ, + arg=ep.CustomObjArgument(name=i.custom_obj.arg.name, class_fqn=i.custom_obj.arg.class_fqn), + target=i.custom_obj.custom_obj_name, + ) + else: + raise AssertionError(f"Unknown input spec {i}") + + def deserialize_output_spec(self, o: OutputSpec) -> ep.OutputSpec: + if o.type == "user_output": + return ep.OutputSpec( + kind=ep.OutputKind.USER_OUTPUT, + arg=self.deserialize_argument_spec(o.user_output.arg), + target=None, + ) + elif o.type == "loss_output": + return ep.OutputSpec( + kind=ep.OutputKind.LOSS_OUTPUT, + arg=ep.TensorArgument(name=o.loss_output.arg.name), + target=None, + ) + elif o.type == "buffer_mutation": + return ep.OutputSpec( + kind=ep.OutputKind.BUFFER_MUTATION, + arg=ep.TensorArgument(name=o.buffer_mutation.arg.name), + target=o.buffer_mutation.buffer_name + ) + elif o.type == "gradient_to_parameter": + return ep.OutputSpec( + kind=ep.OutputKind.GRADIENT_TO_PARAMETER, + arg=ep.TensorArgument(name=o.gradient_to_parameter.arg.name), + target=o.gradient_to_parameter.parameter_name + ) + elif o.type == "gradient_to_user_input": + return ep.OutputSpec( + kind=ep.OutputKind.GRADIENT_TO_USER_INPUT, + arg=ep.TensorArgument(name=o.gradient_to_user_input.arg.name), + target=o.gradient_to_user_input.user_input_name + ) + elif o.type == "user_input_mutation": + return ep.OutputSpec( + kind=ep.OutputKind.USER_INPUT_MUTATION, + arg=ep.TensorArgument(name=o.user_input_mutation.arg.name), + target=o.user_input_mutation.user_input_name + ) + else: + raise AssertionError(f"Unknown output spec {o}") + + def deserialize_signature(self, sig: GraphSignature) -> ep.ExportGraphSignature: + return ep.ExportGraphSignature( + input_specs=[self.deserialize_input_spec(i) for i in sig.input_specs], + output_specs=[self.deserialize_output_spec(o) for o in sig.output_specs] + ) + + def deserialize( + self, + serialized_graph_module: GraphModule, + serialized_state_dict: bytes, + constants: bytes, + symbol_name_to_range: Optional[Dict[str, symbolic_shapes.ValueRanges]] = None, + ) -> Result: + global _CURRENT_DESERIALIZER + assert _CURRENT_DESERIALIZER is None + _CURRENT_DESERIALIZER = self + try: + self.shape_env = symbolic_shapes.ShapeEnv(assume_static_by_default=True) + self.fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=False, + allow_non_fake_inputs=True, + shape_env=self.shape_env, + ) + self.symbol_name_to_symbol: Dict[str, sympy.Symbol] = {} + self.symbol_name_to_range = {} if symbol_name_to_range is None else symbol_name_to_range + self.signature = self.deserialize_signature(serialized_graph_module.signature) + self.constants = deserialize_torch_artifact(constants) + self.deserialize_graph(serialized_graph_module.graph) + + module_call_graph = self.deserialize_module_call_graph(serialized_graph_module.module_call_graph) + return GraphModuleDeserializer.Result( + graph_module=ep._create_graph_module_for_export(self.module, self.graph), + signature=self.signature, + module_call_graph=module_call_graph, + names_to_symbols=self.symbol_name_to_symbol, + state_dict=deserialize_torch_artifact(serialized_state_dict), + constants=self.constants, + ) + finally: + _CURRENT_DESERIALIZER = None + + def sync_fx_node(self, name: str, fx_node: torch.fx.Node): + if name in self.serialized_name_to_node: + raise SerializeError(f"Node {name} has already been deserialized before.") + self.serialized_name_to_node[name] = fx_node + assert "val" not in fx_node.meta + fx_node.meta["val"] = self.serialized_name_to_meta[name] + + def deserialize_sym_op_inputs(self, inputs): + return tuple(self.deserialize_input(input.arg) for input in inputs) + + def deserialize_inputs(self, target: torch._ops.OpOverload, serialized_node: Node): + schema_args = target._schema.arguments + actual_args = { + input.name: self.deserialize_input(input.arg) for input in serialized_node.inputs + } + args = [] + kwargs = {} + for schema_arg in schema_args: + is_positional = not schema_arg.has_default_value() and not schema_arg.kwarg_only + if is_positional: + args.append(actual_args[schema_arg.name]) + else: + if schema_arg.name in actual_args: + kwargs[schema_arg.name] = actual_args[schema_arg.name] + return tuple(args), kwargs + + def deserialize_hoo_inputs(self, inputs: List[NamedArgument]): + """ + For deserializing HOO inputs since HOOs do not have a schema. + """ + args = [] + kwargs = {} + for input_ in inputs: + if input_.name != "": + kwargs[input_.name] = self.deserialize_input(input_.arg) + else: + args.append(self.deserialize_input(input_.arg)) + return (tuple(args), kwargs) + + def deserialize_input(self, inp: Argument) -> Any: + value = inp.value + typ_ = inp.type + if typ_ == "as_none": + # None should converted as None, but is encoded as bool in serialized + # Convert serialized object to torch equivalent + return None + elif typ_ == "as_tensor": + return self.serialized_name_to_node[inp.as_tensor.name] + elif typ_ == "as_scalar_type": + return _SERIALIZE_TO_TORCH_DTYPE[inp.as_scalar_type] + elif typ_ == "as_memory_format": + return _SERIALIZE_TO_TORCH_MEMORY_FORMAT[inp.as_memory_format] + elif typ_ == "as_layout": + return _SERIALIZE_TO_TORCH_LAYOUT[inp.as_layout] + elif typ_ == "as_graph": + assert isinstance(value, GraphArgument) + with self.save_graph_module(): + self.deserialize_graph(value.graph) + submodule = ep._create_graph_module_for_export(self.module, self.graph) + self.module.register_module(value.name, submodule) + return self.graph.create_node( + "get_attr", + value.name, + name=value.name, + ) + elif typ_ == "as_device": + return deserialize_device(inp.as_device) + elif typ_ == "as_int": + return inp.as_int + elif typ_ == "as_float": + return inp.as_float + elif typ_ == "as_bool": + return inp.as_bool + elif typ_ == "as_string": + return inp.as_string + elif typ_ == "as_sym_int": + return self.deserialize_sym_argument(inp.as_sym_int) + elif typ_ == "as_sym_bool": + return self.deserialize_sym_argument(inp.as_sym_bool) + elif isinstance(value, list): + if len(value) == 0: + return [] + elif typ_ == "as_tensors": + result = [] + for arg in value: + result.append(self.serialized_name_to_node[arg.name]) + return result + elif typ_ in ("as_ints", "as_floats", "as_bools", "as_strings"): + # convert from serialized.python.types.List to python list + return list(value) + elif typ_ in ("as_sym_ints", "as_sym_bools"): + return [self.deserialize_sym_argument(arg) for arg in value] + elif typ_ == "as_optional_tensors": + def deserialize_optional_tensor_args(a): + if a.type == "as_none": + return None + elif a.type == "as_tensor": + return self.serialized_name_to_node[a.value] + else: + raise SerializeError(f"Unhandled argument {inp}") + return list(map(deserialize_optional_tensor_args, value)) + else: + raise SerializeError(f"Unhandled argument {inp}") + elif typ_ == "as_custom_obj": + if inp.as_custom_obj.name in self.serialized_name_to_node: + # Custom object has been lifted as an input + return self.serialized_name_to_node[inp.as_custom_obj.name] + return self.constants[inp.as_custom_obj.name] + elif typ_ == "as_operator": + return self.deserialize_operator(inp.as_operator) + else: + raise SerializeError(f"Unhandled argument {inp}") + + def deserialize_sym_argument(self, sym_arg): + if isinstance(sym_arg, SymIntArgument): + if sym_arg.type == "as_int": + return sym_arg.as_int + elif sym_arg.type == "as_name": + return self.serialized_name_to_node[sym_arg.as_name] + elif isinstance(sym_arg, SymBoolArgument): + if sym_arg.type == "as_bool": + return sym_arg.as_bool + elif sym_arg.type == "as_name": + return self.serialized_name_to_node[sym_arg.as_name] + raise SerializeError(f"Unknown symbolic argument type: {sym_arg}") + + def deserialize_sym_op_outputs(self, serialized_node: Node, fx_node: torch.fx.Node): + self.sync_fx_node(serialized_node.outputs[0].value.as_name, fx_node) + + def deserialize_outputs(self, serialized_node: Node, fx_node: torch.fx.Node): + # Check single value return + if len(serialized_node.outputs) == 0: + return + if ( + len(serialized_node.outputs) == 1 + and serialized_node.outputs[0].type == "as_tensor" + ): + self.sync_fx_node(serialized_node.outputs[0].as_tensor.name, fx_node) + return + elif ( + len(serialized_node.outputs) == 1 and + isinstance(serialized_node.outputs[0].value, (SymIntArgument, SymBoolArgument)) + ): + self.sync_fx_node(serialized_node.outputs[0].value.as_name, fx_node) + return + + self.deserialize_multiple_outputs(serialized_node, fx_node) + + def deserialize_multiple_outputs(self, serialized_node: Node, fx_node: torch.fx.Node) -> None: + deserialized_metadata = self.deserialize_metadata(serialized_node.metadata) + + def generate_getitem(meta_val, fx_node: torch.fx.Node, arg: Union[TensorArgument, SymIntArgument], idx: int): + if isinstance(arg, TensorArgument): + name = arg.name + elif isinstance(arg, SymIntArgument): + name = arg.as_name + else: + raise AssertionError(f"generate_getitem got unknown argument type {type(arg)}") + individual_output = self.graph.create_node( + "call_function", + operator.getitem, + (fx_node, idx), + name=name, + ) + self.sync_fx_node(name, individual_output) + meta_val.append(self.serialized_name_to_meta[name]) + # The derived `getitem` nodes should have the same stacktrace as the + # original `fx_node` + individual_output.meta.update(deserialized_metadata) + + def generate_getitems(meta_val, fx_node: torch.fx.Node, args): + for idx, arg in enumerate(args): + if isinstance(arg, Argument): + arg = arg.value + if isinstance(arg, (TensorArgument, SymIntArgument)): + generate_getitem(meta_val, fx_node, arg, idx) + elif isinstance(arg, (list, tuple)): + list_output = self.graph.create_node( + "call_function", + operator.getitem, + (fx_node, idx), + ) + meta_val.append([]) + generate_getitems(meta_val[-1], list_output, arg) + list_output.meta.update(deserialized_metadata) + list_output.meta['val'] = meta_val[-1] + else: + raise NotImplementedError(f"Unimplemented node output type: {arg}") + + # Convert multiple return types to FX format. + # In FX, each node only returns one value. So in order to represent + # multiple return values, we have to emit a `getitem` node for each + # return value. + # This performs the inverse mapping of the `serialize_outputs` call in + # serialization, see [NOTE: Multiple outputs] + meta_val: List[Any] = [] + if len(serialized_node.outputs) == 1: + assert isinstance(serialized_node.outputs[0].value, list) + assert isinstance(serialized_node.outputs[0].value[0], TensorArgument) + generate_getitems(meta_val, fx_node, serialized_node.outputs[0].as_tensors) + else: + generate_getitems(meta_val, fx_node, serialized_node.outputs) + + # also update the metaval for `fx_node` to be a list(meta) + fx_node.meta["val"] = tuple(meta_val) + self.serialized_name_to_node[fx_node.name] = fx_node + + def deserialize_metadata(self, metadata: Dict[str, str]) -> Dict[str, Any]: + ret: Dict[str, Any] = {} + if stack_trace := metadata.get("stack_trace"): + ret["stack_trace"] = stack_trace + + def deserialize_meta_func(serialized_target: str): + module = None + if serialized_target.startswith("torch.nn"): + module = torch.nn + serialized_target_names = serialized_target.split(".")[2:] + elif serialized_target.startswith("torch"): + module = torch + serialized_target_names = serialized_target.split(".")[1:] + else: + return self.deserialize_operator(serialized_target) + + target = module + for name in serialized_target_names: + if not hasattr(target, name): + return serialized_target + else: + target = getattr(target, name) + return target + + if nn_module_stack_str := metadata.get("nn_module_stack"): + # Originally serialized to "key,orig_path,type_str" + def import_nn_module_stack(key, path, ty): + return key, (path, ty) + nn_module_stack = dict( + import_nn_module_stack(*item.split(",")) + for item in nn_module_stack_str.split(ST_DELIMITER) + ) + ret["nn_module_stack"] = nn_module_stack + + if source_fn_st_str := metadata.get("source_fn_stack"): + # Originally serializes to "fx_node_name,op_str" + source_fn_st = [] + for source_fn_str in source_fn_st_str.split(ST_DELIMITER): + name, target_str = source_fn_str.split(",") + source_fn_st.append((name, deserialize_meta_func(target_str))) + ret["source_fn_stack"] = source_fn_st + return ret + + def deserialize_argument_spec(self, x: Argument) -> ep.ArgumentSpec: + if x.type == "as_tensor": + return ep.TensorArgument(name=x.as_tensor.name) + elif x.type == "as_sym_int": + return ep.SymIntArgument(name=x.as_sym_int.as_name) + else: + return ep.ConstantArgument(value=self.deserialize_input(x)) + + def deserialize_module_call_signature(self, module_call_signature: ModuleCallSignature) -> ep.ModuleCallSignature: + return ep.ModuleCallSignature( + inputs=[self.deserialize_argument_spec(x) for x in module_call_signature.inputs], + outputs=[self.deserialize_argument_spec(x) for x in module_call_signature.outputs], + in_spec=treespec_loads(module_call_signature.in_spec), + out_spec=treespec_loads(module_call_signature.out_spec), + ) + + def deserialize_module_call_graph(self, module_call_graph: List[ModuleCallEntry]) -> List[ep.ModuleCallEntry]: + return [ + ep.ModuleCallEntry( + fqn=entry.fqn, + signature=self.deserialize_module_call_signature(entry.signature) if entry.signature else None, + ) for entry in module_call_graph + ] + + +class ExportedProgramDeserializer: + def __init__(self, expected_opset_version: Optional[Dict[str, int]] = None): + self.expected_opset_version: Dict[str, int] = {} + if expected_opset_version: + self.expected_opset_version.update(expected_opset_version) + if "aten" not in self.expected_opset_version: + self.expected_opset_version["aten"] = torch._C._get_max_operator_version() + + def deserialize_range_constraints( + self, + symbol_name_to_range: Dict[str, symbolic_shapes.ValueRanges], + symbol_name_to_symbol: Dict[str, sympy.Symbol], + ) -> Dict[sympy.Symbol, ValueRanges]: + range_constraints = {} + for k, v in symbol_name_to_range.items(): + if symbol := symbol_name_to_symbol.get(k): + range_constraints[symbol] = v # type: ignore[arg-type] + else: + log.warning(f"Symbol {k} did not appear in the graph that was deserialized") # noqa: G004 + return range_constraints + + def deserialize( + self, serialized_artifact: SerializedArtifact + ) -> ep.ExportedProgram: + assert isinstance(serialized_artifact.exported_program, ExportedProgram) + + if serialized_artifact.exported_program.schema_version.major != SCHEMA_VERSION[0]: + raise SerializeError( + f"Serialized schema version {serialized_artifact.exported_program.schema_version} " + f"does not match our current schema version {SCHEMA_VERSION}." + ) + + symbol_name_to_range = { + k: symbolic_shapes.ValueRanges(_int_to_sympy_int(v.min_val), _int_to_sympy_int(v.max_val)) + for k, v in serialized_artifact.exported_program.range_constraints.items() + } + res = ( + GraphModuleDeserializer() + .deserialize( + serialized_artifact.exported_program.graph_module, + serialized_artifact.state_dict, + serialized_artifact.constants, + symbol_name_to_range, + ) + ) + range_constraints = self.deserialize_range_constraints( + symbol_name_to_range, res.names_to_symbols, + ) + model_opset_version: Optional[Dict[str, int]] = serialized_artifact.exported_program.opset_version + self._validate_model_opset_version(model_opset_version) + + upgrader = GraphModuleOpUpgrader(self.expected_opset_version, model_opset_version) + + exported_program = ep.ExportedProgram( + root=res.graph_module, + graph=res.graph_module.graph, + graph_signature=res.signature, + state_dict=res.state_dict, # type: ignore[arg-type] + range_constraints=range_constraints, + module_call_graph=res.module_call_graph, + example_inputs=None, + verifier=load_verifier(serialized_artifact.exported_program.dialect), + constants=res.constants, + ) + return upgrader.upgrade(exported_program) + + def _validate_model_opset_version(self, model_opset_version: Optional[Dict[str, int]]): + """Compare model_opset_version with expected_opset_version and raise error if we can't resolve the version + difference. + E.g., model_opset_version = {"aten": 3, "custom": 4} + expected_opset_version = {"aten": 4, "custom": 4} + This means we can use an upgrader for ATen to reconcile the deserialized model. + + The logic of this method: + + For common op namespaces: + 1. if model version < expected version, this case can be handled by upgraders. + 2. if model version > expected version, we need downgraders but not implemented yet. + 3. if model version == expected version, we don't need extra handling. + + For op namespace only in model_opset_version, we should give a warning because it is missing from + expected_opset_version. + """ + if not model_opset_version: + raise RuntimeError("Serialized model should have opset version.") + common_namespaces = {key for key in model_opset_version if key in self.expected_opset_version} + for namespace in common_namespaces: + assert ( + isinstance(model_version := model_opset_version[namespace], int) + ), f"model_opset_version value should be int, got {model_opset_version[namespace]}" + + assert ( + isinstance(compiler_version := self.expected_opset_version[namespace], int) + ), f"expected_opset_version value should be int, got {self.expected_opset_version[namespace]}" + + # TODO(larryliu0820): Add support for upgrader & downgrader + if model_version != compiler_version: + raise NotImplementedError( + f"Model opset version {model_opset_version} doesn't match to compiler opset version " + f"{self.expected_opset_version}! Upgrader/downgrader is not implemented yet." + ) + for namespace in model_opset_version: + if namespace in common_namespaces: + continue + log.warning("Compiler doesn't have a version table for op namespace: {ns}. ", extra={"ns": namespace}) + + +class EnumEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, bytes): + return base64.b64encode(obj).decode('utf-8') + return super().default(obj) + + +def _dataclass_to_dict(obj): + if isinstance(obj, _Union): + return {obj.type: _dataclass_to_dict(obj.value)} + elif dataclasses.is_dataclass(obj): + return { + f.name: _dataclass_to_dict(getattr(obj, f.name)) + for f in dataclasses.fields(obj) + if not (f.default is None and getattr(obj, f.name) is None) + } + elif isinstance(obj, list): + return [_dataclass_to_dict(x) for x in obj] + elif isinstance(obj, tuple): + return tuple(_dataclass_to_dict(x) for x in obj) + elif isinstance(obj, dict): + return {k: _dataclass_to_dict(v) for k, v in obj.items()} + else: + return obj + + +def serialize( + exported_program: ep.ExportedProgram, + opset_version: Optional[Dict[str, int]] = None, +) -> SerializedArtifact: + serialized_artifact = ( + ExportedProgramSerializer(opset_version).serialize(exported_program) + ) + assert isinstance(serialized_artifact.exported_program, ExportedProgram) + + + json_program = json.dumps( + _dataclass_to_dict(serialized_artifact.exported_program), cls=EnumEncoder + ) + json_bytes = json_program.encode('utf-8') + artifact = SerializedArtifact( + json_bytes, + serialized_artifact.state_dict, + serialized_artifact.constants + ) + return artifact + + +def _dict_to_dataclass(cls, data): + assert not isinstance(cls, str), f"Unresolved class type: '{cls}'." + if typing.get_origin(cls) == typing.Union and type(None) in typing.get_args(cls): + if data is None: + return None + ty_args = typing.get_args(cls) + assert len(ty_args) == 2 + return _dict_to_dataclass(ty_args[0], data) + elif isinstance(cls, type) and issubclass(cls, _Union): + assert isinstance(data, dict) + assert len(data) == 1 + _type = next(iter(data.keys())) + _value = next(iter(data.values())) + assert isinstance(_type, str) + field_type = cls.__annotations__[_type] + return cls.create(**{_type: _dict_to_dataclass(field_type, _value)}) + elif dataclasses.is_dataclass(cls): + obj = cls(**data) # type: ignore[assignment] + type_hints = typing.get_type_hints(cls) + for f in dataclasses.fields(cls): + name = f.name + new_field_obj = _dict_to_dataclass(type_hints[name], getattr(obj, name)) + setattr(obj, name, new_field_obj) + return obj + elif isinstance(data, list): + if len(data) == 0: + return data + d_type = typing.get_args(cls)[0] + return [ + _dict_to_dataclass(d_type, d) + for d in data + ] + elif isinstance(data, dict): + v_type = typing.get_args(cls)[1] + return { + k: _dict_to_dataclass(v_type, v) + for k, v in data.items() + } + return data + + +def deserialize( + artifact: SerializedArtifact, + expected_opset_version: Optional[Dict[str, int]] = None, +) -> ep.ExportedProgram: + assert isinstance(artifact.exported_program, bytes) + exported_program_str = artifact.exported_program.decode('utf-8') + exported_program_dict = json.loads(exported_program_str) + serialized_exported_program = _dict_to_dataclass(ExportedProgram, exported_program_dict) + return ( + ExportedProgramDeserializer(expected_opset_version) + .deserialize( + SerializedArtifact( + serialized_exported_program, + artifact.state_dict, + artifact.constants + ) + ) + ) + + +def _canonicalize_graph(sorted_inputs, sorted_outputs, graph) -> Tuple[Graph, Dict[str, str]]: + def _get_argument(a: Argument): + if a.type == "as_none": + return None + elif a.type == "as_tensor": + return a.as_tensor + elif a.type == "as_tensors": + return a.as_tensors + elif a.type == "as_int": + return None + elif a.type == "as_ints": + return None + elif a.type == "as_float": + return None + elif a.type == "as_floats": + return None + elif a.type == "as_string": + return None + elif a.type == "as_strings": + return None + elif a.type == "as_sym_int": + return a.as_sym_int + elif a.type == "as_sym_ints": + return a.as_sym_ints + elif a.type == "as_scalar_type": + return None + elif a.type == "as_memory_format": + return None + elif a.type == "as_layout": + return None + elif a.type == "as_device": + return None + elif a.type == "as_bool": + return None + elif a.type == "as_bools": + return None + elif a.type == "as_sym_bool": + return a.as_sym_bool + elif a.type == "as_sym_bools": + return a.as_sym_bools + elif a.type == "as_graph": + return None + elif a.type == "as_optional_tensors": + return a.as_optional_tensors + elif a.type == "as_custom_obj": + return None + elif a.type == "as_operator": + return None + else: + raise AssertionError(f"Unknown input type to the ExportedProgram: {a}") + + # Stage 1: Reorder named items. + def for_args(f, a): + assert isinstance(a, Argument) + pytree.tree_map(f, _get_argument(a)) + + def sort_nodes(nodes): + @dataclass + class Edges: + outs: List[int] + ins: int + + graph_inputs: Set[str] = set() + def_table: Dict[str, int] = {} + edges: Dict[int, Edges] = {} + candidates: List[Tuple[str, List[Tuple[str, List[int]]], int]] = [] + rank: Dict[str, int] = {} + ret: List[Node] = [] + + def get_name(a) -> Optional[str]: + if a is None: + return None + if isinstance(a, TensorArgument): + return a.name + elif isinstance(a, (SymIntArgument, SymBoolArgument)): + if a.type == "as_name": + return a.as_name + elif a.type in ("as_int", "as_bool"): + return None + else: + raise AssertionError(f"Unknown argument type: {a}") + elif isinstance(a, OptionalTensorArgument): + if a.type == "as_tensor": + assert isinstance(a.as_tensor, str) + return a.as_tensor + elif a.type == "as_none": + return None + else: + raise AssertionError(f"Unknown optional tensor type: {a}") + else: + raise AssertionError(f"Unknown argument type: {a}") + + for i in sorted_inputs: + def add_input(a): + if s := get_name(a): + graph_inputs.add(s) + + for_args(add_input , i) + + for idx, node in enumerate(nodes): + def add_def(a): + if s := get_name(a): + assert s not in def_table + def_table[s] = idx + + for o in node.outputs: + for_args(add_def, o) + + edges[idx] = Edges([], 0) + + for idx, user in enumerate(nodes): + def add_edge(a): + if s := get_name(a): + if s not in def_table: + assert s in graph_inputs + return + src = def_table[s] + edges[src].outs.append(idx) + edges[idx].ins += 1 + + for i in user.inputs: + for_args(add_edge, i.arg) + + def add_rank(a): + if s := get_name(a): + assert s not in rank + rank[s] = len(rank) + + def get_rank(a): + if s := get_name(a): + return rank[s] + else: + return -1 + + for i in sorted_inputs: + for_args(add_rank, i) + + def add_candidate(idx: int): + def get_ranks(i): + ranks = [] + for_args(lambda x: ranks.append(get_rank(x)), i) + return ranks + node = nodes[idx] + args_rank = [(a.name, get_ranks(a.arg)) for a in node.inputs] + heapq.heappush(candidates, (node.target, args_rank, idx)) + + for idx, e in edges.items(): + if e.ins == 0: + add_candidate(idx) + + while len(candidates) > 0: + _, _, idx = heapq.heappop(candidates) + node = nodes[idx] + for o in node.outputs: + for_args(add_rank, o) + ret.append(node) + assert idx in edges + for user in edges[idx].outs: + e = edges[user] + assert e.ins > 0 + e.ins -= 1 + if e.ins == 0: + add_candidate(user) + edges[idx].outs.clear() + + return ret + + sorted_nodes = sort_nodes(graph.nodes) + assert len(sorted_nodes) == len(graph.nodes) + + # Stage 2: Rename nodes. + name_table: Dict[str, str] = {} + + def rename_def(a): + def _rename(arg_name, values): + new_name = f"_{len(name_table)}" + assert arg_name not in name_table + name_table[arg_name] = new_name + assert arg_name in values + values[new_name] = values.pop(arg_name) + return new_name + + if a is None: + return + if isinstance(a, TensorArgument): + a.name = _rename(a.name, graph.tensor_values) + elif isinstance(a, SymIntArgument): + if a.type == "as_name": + a.as_name = _rename(a.as_name, graph.sym_int_values) + elif isinstance(a, SymBoolArgument): + if a.type == "as_name": + a.as_name = _rename(a.as_name, graph.sym_bool_values) + else: + raise AssertionError(f"Unknown argument type: {a}") + + def replace_use(a): + if a is None: + return + if isinstance(a, TensorArgument): + a.name = name_table.get(a.name, a.name) + elif isinstance(a, SymIntArgument): + if a.type == "as_name": + a.as_name = name_table.get(a.as_name, a.as_name) + elif isinstance(a, SymBoolArgument): + if a.type == "as_name": + a.as_name = name_table.get(a.as_name, a.as_name) + elif isinstance(a, OptionalTensorArgument): + if a.type == "as_tensor": + assert isinstance(a.as_tensor, str) + a.as_tensor = name_table.get(a.as_tensor, a.as_tensor) + else: + raise AssertionError(f"Unknown argument type: {a}") + + for i in sorted_inputs: + for_args(rename_def, i) + + for n in sorted_nodes: + for o in n.outputs: + for_args(rename_def, o) + + for n in sorted_nodes: + for i in n.inputs: + for_args(replace_use, i.arg) + + for o in sorted_outputs: + for_args(replace_use, o) + + # Stage 3: Remove unstable fields. + for n in sorted_nodes: + n.metadata.clear() + + # Stage 4: Aggregate values. + sorted_tensor_values = dict(sorted(graph.tensor_values.items(), key=lambda x: x[0])) + sorted_sym_int_values = dict(sorted(graph.sym_int_values.items(), key=lambda x: x[0])) + sorted_sym_bool_values = dict(sorted(graph.sym_bool_values.items(), key=lambda x: x[0])) + + # Stage 5: Recurse in subgraphs. + counter = 0 + for node in sorted_nodes: + for i in node.inputs: + a = i.arg + if a.type == "as_graph": + a.as_graph.graph = _canonicalize_graph( + a.as_graph.graph.inputs, + a.as_graph.graph.outputs, + a.as_graph.graph + ) + a.as_graph.name = f"_g{counter}" + counter += 1 + + graph = Graph( + inputs=sorted_inputs, + outputs=sorted_outputs, + nodes=sorted_nodes, + tensor_values=sorted_tensor_values, + sym_int_values=sorted_sym_int_values, + sym_bool_values=sorted_sym_bool_values, + is_single_tensor_return=graph.is_single_tensor_return, + ) + return graph, name_table + + +def canonicalize(ep: ExportedProgram) -> ExportedProgram: + """ + Normalize a serialized ExportedProgram, so that different eager program which + shares the same semantics can get a single representation on disk. + + This function canonicalizes an ExportedProgram by: + + 1. Sorting nodes in topological order. + 2. Rename nodes to have unique names. + 3. Remove unstable fields. + 4. Aggregate the above program fields. + 5. Recurse in subgraphs. + + Args: + ep (ExportedProgram): The ExportedProgram to canonicalize. + + Returns: + ExportedProgram: The canonicalized exported program. + """ + ep = copy.deepcopy(ep) + + opset_version = dict(sorted(ep.opset_version.items(), key=lambda x: x[0])) + range_constraints = dict(sorted(ep.range_constraints.items(), key=lambda x: x[0])) + module_call_graph = sorted(ep.graph_module.module_call_graph, key=lambda x: x.fqn) + signature = ep.graph_module.signature + graph = ep.graph_module.graph + + assert len(graph.inputs) == len(signature.input_specs) + assert len(graph.outputs) == len(signature.output_specs) + + def rank_input(inp) -> Tuple[int, Optional[str], int]: + idx, (arg, spec) = inp + assert isinstance(spec, InputSpec) + if spec.type == "user_input": + return 5, None, idx + elif spec.type == "parameter": + return 1, spec.parameter.parameter_name, idx + elif spec.type == "buffer": + return 2, spec.buffer.buffer_name, idx + elif spec.type == "tensor_constant": + return 3, spec.tensor_constant.tensor_constant_name, idx + elif spec.type == "custom_obj": + return 4, spec.custom_obj.custom_obj_name, idx + else: + raise AssertionError(f"Unknown input type: {spec}") + + def rank_output(out) -> Tuple[int, Optional[str], int]: + idx, (arg, spec) = out + assert isinstance(spec, OutputSpec) + if spec.type == "user_output": + return 3, None, idx + elif spec.type == "loss_output": + return 3, None, idx + elif spec.type == "buffer_mutation": + return 1, spec.buffer_mutation.buffer_name, idx + elif spec.type == "gradient_to_parameter": + return 4, spec.gradient_to_parameter.parameter_name, idx + elif spec.type == "gradient_to_user_input": + return 5, None, idx + elif spec.type == "user_input_mutation": + return 2, None, idx + else: + raise AssertionError(f"Unknown output type: {spec}") + + sorted_ins = sorted(enumerate(zip(graph.inputs, signature.input_specs)), key=rank_input) + sorted_inputs, input_specs = zip(*(i for idx, i in sorted_ins)) # type: ignore[assignment] + + sorted_outs = sorted(enumerate(zip(graph.outputs, signature.output_specs)), key=rank_output) + sorted_outputs, output_specs = zip(*(i for idx, i in sorted_outs)) # type: ignore[assignment] + + sorted_graph, replace_table = _canonicalize_graph(sorted_inputs, sorted_outputs, graph) + + def replace_input(inp): + assert isinstance(spec, InputSpec) + if spec.type == "user_input": + arg = spec.user_input.arg + if arg.type == "as_tensor": + t = arg.as_tensor + t.name = replace_table[t.name] + elif arg.type == "as_sym_int": + s = arg.as_sym_int + if s.type == "as_name": + s.as_name = replace_table[s.as_name] + elif s.type == "as_int": + pass + else: + raise AssertionError(f"Unknown sym_int type: {s}") + elif arg.type in ("as_none", "as_int", "as_float", "as_string", "as_custom_obj"): + return + else: + raise AssertionError(f"Unknown input type: {arg}") + elif spec.type == "parameter": + t = spec.parameter.arg + t.name = replace_table[t.name] + elif spec.type == "buffer": + t = spec.buffer.arg + t.name = replace_table[t.name] + elif spec.type == "tensor_constant": + t = spec.tensor_constant.arg + t.name = replace_table[t.name] + elif spec.type == "custom_obj": + return + else: + raise AssertionError(f"Unknown input type: {spec}") + + def replace_output(out): + assert isinstance(spec, OutputSpec) + if spec.type == "user_output": + arg = spec.user_output.arg + if arg.type == "as_tensor": + t = arg.as_tensor + t.name = replace_table[t.name] + elif arg.type == "as_sym_int": + s = arg.as_sym_int + if s.type == "as_name": + s.as_name = replace_table[s.as_name] + elif s.type == "as_int": + pass + else: + raise AssertionError(f"Unknown sym_int type: {s}") + elif arg.type in ("as_none", "as_int", "as_float", "as_string"): + return + else: + raise AssertionError(f"Unknown input type: {arg}") + elif spec.type == "loss_output": + t = spec.loss_output.arg + t.name = replace_table[t.name] + elif spec.type == "buffer_mutation": + t = spec.buffer_mutation.arg + t.name = replace_table[t.name] + elif spec.type == "gradient_to_parameter": + t = spec.gradient_to_parameter.arg + t.name = replace_table[t.name] + elif spec.type == "gradient_to_user_input": + g = spec.gradient_to_user_input + g.arg.name = replace_table[g.arg.name] + g.user_input_name = replace_table[g.user_input_name] + elif spec.type == "user_input_mutation": + u = spec.user_input_mutation + u.arg.name = replace_table[u.arg.name] + u.user_input_name = replace_table[u.user_input_name] + else: + raise AssertionError(f"Unknown output type: {spec}") + + for spec in input_specs: + replace_input(spec) + + for spec in output_specs: + replace_output(spec) + + return ExportedProgram( + graph_module=GraphModule( + graph=sorted_graph, + signature=GraphSignature( + input_specs=list(input_specs), + output_specs=list(output_specs), + ), + module_call_graph=module_call_graph, + ), + opset_version=opset_version, + range_constraints=range_constraints, + schema_version=ep.schema_version, + dialect=ep.dialect, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/union.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/union.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfce61f0ab215932e08f4dbc180d36fa08c7a9b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/union.py @@ -0,0 +1,69 @@ +import functools +from dataclasses import fields +from typing import Hashable, Set + + +class _UnionTag(str): + _cls: Hashable + + @staticmethod + def create(t, cls): + tag = _UnionTag(t) + assert not hasattr(tag, "_cls") + tag._cls = cls + return tag + + def __eq__(self, cmp) -> bool: + assert isinstance(cmp, str) + other = str(cmp) + assert other in _get_field_names( + self._cls + ), f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}" + return str(self) == other + + def __hash__(self): + return hash(str(self)) + + +@functools.lru_cache(maxsize=None) +def _get_field_names(cls) -> Set[str]: + return {f.name for f in fields(cls)} + + +class _Union: + _type: _UnionTag + + @classmethod + def create(cls, **kwargs): + assert len(kwargs) == 1 + obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type] + obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls) + return obj + + def __post_init__(self): + assert not any(f.name in ("type", "_type", "create", "value") for f in fields(self)) # type: ignore[arg-type, misc] + + @property + def type(self) -> str: + try: + return self._type + except AttributeError as e: + raise RuntimeError( + f"Please use {type(self).__name__}.create to instantiate the union type." + ) from e + + @property + def value(self): + return getattr(self, self.type) + + def __getattribute__(self, name): + attr = super().__getattribute__(name) + if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type] + raise AttributeError(f"Field {name} is not set.") + return attr + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return f"{type(self).__name__}({self.type}={getattr(self, self.type)})" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/upgrade.py b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/upgrade.py new file mode 100644 index 0000000000000000000000000000000000000000..c34917f3dd074cf50e3ab2e030f9730c3d4333a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_export/serde/upgrade.py @@ -0,0 +1,201 @@ +import logging +from collections import defaultdict +from typing import Tuple, Dict, Optional, List + +import torch +from torch.export import export +from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse +from torch._export.pass_infra.node_metadata import NodeMetadata +from torch._export.pass_infra.proxy_value import ProxyValue +from torch._subclasses import FakeTensor +from torch.fx.node import Target, Argument +from torch.library import Library +from torch.utils._pytree import tree_unflatten +import torch._export.exported_program as ep +import re + +lib = Library("aten", "FRAGMENT") +impl_lib = Library("aten", "IMPL") + +log = logging.getLogger(__name__) + + +def get_target_version(versioned_upgrader_name: str) -> int: + """div_Scalar_0_3 is the name of the upgrader, meaning it applies to div.Scalar of version 0 to 3 and is + upgrading to version 4.""" + if not re.match("^.*_[0-9]+_[0-9]+$", versioned_upgrader_name): + raise RuntimeError(f"Upgrader name {versioned_upgrader_name} is invalid") + + return int(versioned_upgrader_name.split('_')[-1]) + 1 + + +def get_upgraders() -> Dict[str, Tuple[str, str]]: + """Getting upgraders entry map and operator version map and merge them into one dict.""" + upgraders = torch._C._get_upgraders_entry_map() + op_version_map = torch._C._get_operator_version_map() + output: Dict[str, Tuple[str, str]] = defaultdict(tuple) # type: ignore[arg-type] + for opname, entry_list in op_version_map.items(): + if not entry_list: + raise RuntimeError(f"Op version map has an empty entry for opname {opname}") + entry = entry_list[0] + old_schema = entry.old_schema + upgrader_name = entry.upgrader_name + upgrader_str = upgraders.get(upgrader_name, None) + if not upgrader_str: + raise RuntimeError(f"Can't find upgrader for op {opname} and upgrader name {upgrader_name}") + output[upgrader_name] = (old_schema, upgrader_str) + return output + + +class GraphModuleOpUpgrader: + """This upgrader is able to upgrade the old version of ops in a given GraphModule, if all upgraders are available. + To use it, retrieve upgraders from somewhere (TorchScript API or new API) and pass it into this upgrader. In + __init__() it does the following: + 1. parse the upgrader list and reorder for upgrading purpose. + 2. register old versions of operators as custom ops. + 3. prepare upgrader passes. + + In `upgrade()` API run these upgrader passes. + + An example of op_upgraders input: + { + "aten::div__Scalar_0_3": ( # versioned op name + "div._Scalar(self: Tensor, other: Scalar)", # old schema + ''' + def div__Scalar_0_3(self: torch.Tensor, other) -> torch.Tensor: # upgrader in literal string + if (self.is_floating_point() or isinstance(other, float)): + return self.true_divide_(other) + return self.divide_(other, rounding_mode='trunc') + ''', + ), + }, + + Note that we require the upgrader function to be runnable in Python (which is a stricter requirement than the + original TorchScript upgrader). + """ + + class UpgraderPass(_ExportPassBaseDeprecatedDoNotUse): + def __init__(self, old_target: Target, new_target: Target): + super().__init__() + self.old_target = old_target + self.new_target = new_target + + def call_operator( + self, + op, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + if op == self.old_target: + return super().call_operator(self.new_target, args, kwargs, meta) + return super().call_operator(op, args, kwargs, meta) + + def __init__( + self, + compiler_opset_version: Optional[Dict[str, int]] = None, + model_opset_version: Optional[Dict[str, int]] = None, + op_upgraders: Optional[Dict[str, Tuple[str, str]]] = None, + ): + self.op_upgraders: Dict[str, Tuple[str, str]] = get_upgraders() if not op_upgraders else op_upgraders + self.compiler_opset_version = compiler_opset_version if compiler_opset_version else {} + self.model_opset_version = model_opset_version if model_opset_version else {} + self.upgrader_passes: List[GraphModuleOpUpgrader.UpgraderPass] = GraphModuleOpUpgrader._populate_passes( + self._parse_upgraders(self.op_upgraders)) + + def _parse_upgraders(self, op_upgraders: Optional[Dict[str, Tuple[str, str]]] = None) -> List[Tuple[str, str]]: + """Reorder op_upgraders by version number, return an ordered list of tuples, containing old op schema as well + as the upgrader function string literal.""" + # TODO(larryliu0820): Add support for custom ops + op_namespace = "aten" + if not op_upgraders or op_namespace not in self.model_opset_version or op_namespace not in self.compiler_opset_version: + return [] + model_ver = self.model_opset_version[op_namespace] + curr_ver = self.compiler_opset_version[op_namespace] + + # key is the target version. div__Scalar_0_3 should have a key of 4. + versioned_upgraders: Dict[int, Tuple[str, str]] = {get_target_version(name): v for name, v in + op_upgraders.items()} + target_upgraders: List[Tuple[str, str]] = [] + # we need all upgraders from model_ver + 1 to curr_ver, inclusively + for ver in range(model_ver + 1, curr_ver + 1): + if ver in versioned_upgraders: + target_upgraders.append(versioned_upgraders[ver]) + else: + # we may be able to get away with missing upgraders, if that operator is missing from given graph + # module. + log.warning("Missing an upgrader to upgrade to version {ver}.", extra={"ver": ver}) + + return target_upgraders + + @staticmethod + def _populate_passes(upgraders: List[Tuple[str, str]]) -> List[UpgraderPass]: + """Given a list of upgraders, loop through it from lower version to higher version and create passes for all + upgraders. se torch.Library API to register old ops. Op name will be + __. Register upgraders as CompositeImplicitAutograd kernels. For example: + + lib = Library("aten", "FRAGMENT") + lib.define(old_schema) + + impl_lib = Library("aten", "IMPL") + impl_lib.impl("div__Scalar_0_3", div__Scalar_0_3, "CompositeImplicitAutograd") + + @:var upgraders: a list of tuples. The first element of the tuple is the old schema and the second is the + upgrader function literal text. + @:return upgrader passes, order matters + """ + + upgrader_passes = [] + + def register_old_op(name: str, schema: str, impl_str: str): + """Registers an old version operator using impl_name as old op name.""" + lib.define(schema) + try: + exec(impl_str) + except Exception as e: + raise RuntimeError(f"Invalid upgrader string: {impl_str}") from e + impl_lib.impl(name, locals()[name], "CompositeImplicitAutograd") + + for (schema, upgrader_str) in upgraders: + upgrader_name = upgrader_str.split('(')[0].split(' ')[-1] + op_name = schema.split('(')[0].split("::")[-1] + schema = schema.replace(op_name, upgrader_name) + try: + register_old_op(name=upgrader_name, schema=schema, impl_str=upgrader_str) + except RuntimeError as e: + if "with the same name and overload name multiple times" in str(e): + print(f"Registering {upgrader_name} multiple times") + else: + raise RuntimeError from e + old_op_target = getattr(torch.ops.aten, upgrader_name).default + # for example, the operator instance of "aten::div" is torch.op.aten.div.default. We need to append the + # "default" at the end. + op_name, overload_name = (op_name, "default") if "." not in op_name else tuple(op_name.split(".")[:2]) + new_op_target = getattr(getattr(torch.ops.aten, op_name), overload_name) + # Note that the graph will have op names in the graph, but actually they are of old versions. + upgrader_passes.append( + GraphModuleOpUpgrader.UpgraderPass(old_target=new_op_target, new_target=old_op_target)) + + return upgrader_passes + + def upgrade(self, exported_program: ep.ExportedProgram) -> ep.ExportedProgram: + """Run each upgrader pass and then retrace to decompose it. Each upgrader pass replaces the old version of + operators with a custom operator. The custom operator contains a CompositeImplicitAutograd kernel (the + upgrading function itself). After retrace, this custom operator will be decomposed into the ops used in the + upgrader. After all passes are applied, the exported program will be upgraded to the target version.""" + if not self.upgrader_passes: + return exported_program + + args = [n.meta.get("val", None) for n in exported_program.graph.nodes if n.op == "placeholder"] + args_real_tensors = [torch.ones(tuple(arg.size()), dtype=arg.dtype) if isinstance(arg, FakeTensor) else arg for + arg in args] + assert exported_program.call_spec.in_spec is not None + args, kwargs = tree_unflatten(args_real_tensors, exported_program.call_spec.in_spec) + assert kwargs == {} + + for _pass in self.upgrader_passes: + upgraded_program = exported_program._transform_do_not_use(_pass) + # NB: we have to retrace the graph_module instead of ep because of some failure. + exported_program = export(upgraded_program.module(), args, kwargs) + + return exported_program