diff --git a/ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..857ba56c276611fbb0701a06861d68bc76152646 --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:521cf52a07bcfd9e7ad9bb21d552e93734867eb516c719b6b1e27a510b6f3515 +size 16778396 diff --git a/ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..bc1ee79d245287817f4f2f4ddfdff1cb3cffb19f --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b20df606e3efb85cfb30786194df403fd4110bbfd108ef919c5446d8bed158bc +size 16778411 diff --git a/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5cf574697b307fe9fc863c8ed22699fd5a25e03e --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b79740a9833f95550810cd432c64519f335049e04225c97eb6ecb50ab61d9969 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__init__.py b/venv/lib/python3.10/site-packages/torch/onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3af0984d4de0baa84472e1e87bbae33a45f36d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/__init__.py @@ -0,0 +1,177 @@ +from torch import _C +from torch._C import _onnx as _C_onnx +from torch._C._onnx import ( + _CAFFE2_ATEN_FALLBACK, + OperatorExportTypes, + TensorProtoDataType, + TrainingMode, +) + +from . import ( # usort:skip. Keep the order instead of sorting lexicographically + _deprecation, + errors, + symbolic_caffe2, + symbolic_helper, + symbolic_opset7, + symbolic_opset8, + symbolic_opset9, + symbolic_opset10, + symbolic_opset11, + symbolic_opset12, + symbolic_opset13, + symbolic_opset14, + symbolic_opset15, + symbolic_opset16, + symbolic_opset17, + symbolic_opset18, + utils, +) + +# TODO(After 1.13 release): Remove the deprecated SymbolicContext +from ._exporter_states import ExportTypes, SymbolicContext +from ._type_utils import JitScalarType +from .errors import CheckerError # Backwards compatibility +from .utils import ( + _optimize_graph, + _run_symbolic_function, + _run_symbolic_method, + export, + export_to_pretty_string, + is_in_onnx_export, + register_custom_op_symbolic, + select_model_mode_for_export, + unregister_custom_op_symbolic, +) + +from ._internal.exporter import ( # usort:skip. needs to be last to avoid circular import + DiagnosticOptions, + ExportOptions, + ONNXProgram, + ONNXProgramSerializer, + ONNXRuntimeOptions, + InvalidExportOptionsError, + OnnxExporterError, + OnnxRegistry, + dynamo_export, + enable_fake_mode, +) + +from ._internal.onnxruntime import ( + is_onnxrt_backend_supported, + OrtBackend as _OrtBackend, + OrtBackendOptions as _OrtBackendOptions, + OrtExecutionProvider as _OrtExecutionProvider, +) + +__all__ = [ + # Modules + "symbolic_helper", + "utils", + "errors", + # All opsets + "symbolic_caffe2", + "symbolic_opset7", + "symbolic_opset8", + "symbolic_opset9", + "symbolic_opset10", + "symbolic_opset11", + "symbolic_opset12", + "symbolic_opset13", + "symbolic_opset14", + "symbolic_opset15", + "symbolic_opset16", + "symbolic_opset17", + "symbolic_opset18", + # Enums + "ExportTypes", + "OperatorExportTypes", + "TrainingMode", + "TensorProtoDataType", + "JitScalarType", + # Public functions + "export", + "export_to_pretty_string", + "is_in_onnx_export", + "select_model_mode_for_export", + "register_custom_op_symbolic", + "unregister_custom_op_symbolic", + "disable_log", + "enable_log", + # Errors + "CheckerError", # Backwards compatibility + # Dynamo Exporter + "DiagnosticOptions", + "ExportOptions", + "ONNXProgram", + "ONNXProgramSerializer", + "ONNXRuntimeOptions", + "InvalidExportOptionsError", + "OnnxExporterError", + "OnnxRegistry", + "dynamo_export", + "enable_fake_mode", + # DORT / torch.compile + "is_onnxrt_backend_supported", +] + +# Set namespace for exposed private names +ExportTypes.__module__ = "torch.onnx" +JitScalarType.__module__ = "torch.onnx" +ExportOptions.__module__ = "torch.onnx" +ONNXProgram.__module__ = "torch.onnx" +ONNXProgramSerializer.__module__ = "torch.onnx" +ONNXRuntimeOptions.__module__ = "torch.onnx" +dynamo_export.__module__ = "torch.onnx" +InvalidExportOptionsError.__module__ = "torch.onnx" +OnnxExporterError.__module__ = "torch.onnx" +enable_fake_mode.__module__ = "torch.onnx" +OnnxRegistry.__module__ = "torch.onnx" +DiagnosticOptions.__module__ = "torch.onnx" +is_onnxrt_backend_supported.__module__ = "torch.onnx" +_OrtExecutionProvider.__module__ = "torch.onnx" +_OrtBackendOptions.__module__ = "torch.onnx" +_OrtBackend.__module__ = "torch.onnx" + +producer_name = "pytorch" +producer_version = _C_onnx.PRODUCER_VERSION + + +@_deprecation.deprecated( + since="1.12.0", removed_in="2.0", instructions="use `torch.onnx.export` instead" +) +def _export(*args, **kwargs): + return utils._export(*args, **kwargs) + + +# TODO(justinchuby): Deprecate these logging functions in favor of the new diagnostic module. + +# Returns True iff ONNX logging is turned on. +is_onnx_log_enabled = _C._jit_is_onnx_log_enabled + + +def enable_log() -> None: + r"""Enables ONNX logging.""" + _C._jit_set_onnx_log_enabled(True) + + +def disable_log() -> None: + r"""Disables ONNX logging.""" + _C._jit_set_onnx_log_enabled(False) + + +"""Sets output stream for ONNX logging. + +Args: + stream_name (str, default "stdout"): Only 'stdout' and 'stderr' are supported + as ``stream_name``. +""" +set_log_stream = _C._jit_set_onnx_log_output_stream + + +"""A simple logging facility for ONNX exporter. + +Args: + args: Arguments are converted to string, concatenated together with a newline + character appended to the end, and flushed to output stream. +""" +log = _C._jit_onnx_log diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89834def13ced4e577ca20132b76fde8cbab493a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65a1be325b50d7b763909d9eb9d6b857b0786b28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_deprecation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca334ef7e7fece5411c70501fb8d653d5fc21fcf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_deprecation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1eadda83603a7b3d562f702d93f907a56dd67bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7da96332f3962bda6cea79b663129858bec11c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_globals.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_globals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..227aaf35b3b5b417f28065f052c30084fba88997 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_globals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f497fd208daef48c3bbe768768a97fd03752457e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08abf71773d2fec0aa24a7f0ed39f31c3ed6447c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..392fc876181f8575f30ce88b1e56369ed9755a4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_caffe2.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_caffe2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8a3f4f9fbfcb8df920976bdc233296f437036ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_caffe2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b02a6d3c41a6393eea99f71c2df0461e2ab79eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset10.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset10.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..370af42de492061cc1c775fa817820310b7ebde1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset10.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d62c7b16c8a13c37f746b7bd4e5486b97b5bdf73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9af6e4f48a82f66676ab0319e2b96053f5324d13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset13.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset13.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dad69170a6083f5d4e0cb963b47b8b973253206 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset13.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset14.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset14.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4368b1c7114ca553d2ce5585bf646c355ea1473 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset14.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset15.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset15.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fb50512d23e0177ea4c119da4a02485107d06cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset15.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..249b37fb3690f85dd834eb3e56acaf8f25517b2c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset17.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset17.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b5cdcb22f54e19a2cc9eca8d6b6776f127ca5cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset17.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..506ed4c47a56b4c4b04d9131b614340edc19590f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28df179619398bb927ebd94e6089748d9a404a5b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7384dd3cb61d357d66b35abdc44ba038521696d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4077f4d7825cc2d965e612a24761ae3b9412ef25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8f28195666639eb5554b933917075d3aae65971 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/verification.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/verification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c48d9ddb2f179a6e571bf44500135d384600ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/onnx/__pycache__/verification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_constants.py b/venv/lib/python3.10/site-packages/torch/onnx/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..bcdcf4fa6112ab75d1251304ef9928edc212c2c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_constants.py @@ -0,0 +1,25 @@ +"""Constant values used in ONNX.""" + +ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO" + +ONNX_BASE_OPSET = 9 +ONNX_MIN_OPSET = 7 +ONNX_MAX_OPSET = 19 +ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 17 +# ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py +ONNX_DEFAULT_OPSET = 17 +ONNX_CONSTANT_FOLDING_MIN_OPSET = 9 + +PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" + +INT64_MAX = 9223372036854775807 +INT32_MAX = 2147483647 +INT16_MAX = 32767 +INT8_MAX = 127 +UINT8_MAX = 255 + +INT64_MIN = -9223372036854775808 +INT32_MIN = -2147483648 +INT16_MIN = -32768 +INT8_MIN = -128 +UINT8_MIN = 0 diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_experimental.py b/venv/lib/python3.10/site-packages/torch/onnx/_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..158e0cdbda9f4312195b9b98835aa1f3b1466902 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_experimental.py @@ -0,0 +1,28 @@ +"""Experimental classes and functions used by ONNX export.""" + +import dataclasses +from typing import Mapping, Optional, Sequence, Set, Type, Union + +import torch +import torch._C._onnx as _C_onnx + + +@dataclasses.dataclass +class ExportOptions: + """Arguments used by :func:`torch.onnx.export`. + + TODO: Adopt this in `torch.onnx.export` api to replace keyword arguments. + """ + + export_params: bool = True + verbose: bool = False + training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL + input_names: Optional[Sequence[str]] = None + output_names: Optional[Sequence[str]] = None + operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX + opset_version: Optional[int] = None + do_constant_folding: bool = True + dynamic_axes: Optional[Mapping[str, Union[Mapping[int, str], Sequence[int]]]] = None + keep_initializers_as_inputs: Optional[bool] = None + custom_opsets: Optional[Mapping[str, int]] = None + export_modules_as_functions: Union[bool, Set[Type[torch.nn.Module]]] = False diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_exporter_states.py b/venv/lib/python3.10/site-packages/torch/onnx/_exporter_states.py new file mode 100644 index 0000000000000000000000000000000000000000..dddc92befe6e123e1ec9c12f2b0e4cb7e5eccfa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_exporter_states.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from typing import Dict + +from torch import _C + + +class ExportTypes: + r"""Specifies how the ONNX model is stored.""" + + PROTOBUF_FILE = "Saves model in the specified protobuf file." + ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)." + COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)." + DIRECTORY = "Saves model in the specified folder." + + +class SymbolicContext: + """Extra context for symbolic functions. + + Args: + params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue. + env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value. + cur_node (_C.Node): Current node being converted to ONNX domain. + onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to. + """ + + def __init__( + self, + params_dict: Dict[str, _C.IValue], + env: dict, + cur_node: _C.Node, + onnx_block: _C.Block, + ): + self.params_dict: Dict[str, _C.IValue] = params_dict + self.env: Dict[_C.Value, _C.Value] = env + # Current node that is being converted. + self.cur_node: _C.Node = cur_node + # Current onnx block that converted nodes are being appended to. + self.onnx_block: _C.Block = onnx_block diff --git a/venv/lib/python3.10/site-packages/torch/onnx/_globals.py b/venv/lib/python3.10/site-packages/torch/onnx/_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..f827d12be7fbf4b1dbe29c5111f50b22b0a9839b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/_globals.py @@ -0,0 +1,85 @@ +"""Globals used internally by the ONNX exporter. + +Do not use this module outside of `torch.onnx` and its tests. + +Be very judicious when adding any new global variables. Do not create new global +variables unless they are absolutely necessary. +""" +import torch._C._onnx as _C_onnx + +# This module should only depend on _constants and nothing else in torch.onnx to keep +# dependency direction clean. +from torch.onnx import _constants + + +class _InternalGlobals: + """Globals used internally by ONNX exporter. + + NOTE: Be very judicious when adding any new variables. Do not create new + global variables unless they are absolutely necessary. + """ + + def __init__(self): + self._export_onnx_opset_version = _constants.ONNX_DEFAULT_OPSET + self._training_mode: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL + self._in_onnx_export: bool = False + # Whether the user's model is training during export + self.export_training: bool = False + self.operator_export_type: _C_onnx.OperatorExportTypes = ( + _C_onnx.OperatorExportTypes.ONNX + ) + self.onnx_shape_inference: bool = True + self._autograd_inlining: bool = True + + @property + def training_mode(self): + """The training mode for the exporter.""" + return self._training_mode + + @training_mode.setter + def training_mode(self, training_mode: _C_onnx.TrainingMode): + if not isinstance(training_mode, _C_onnx.TrainingMode): + raise TypeError( + "training_mode must be of type 'torch.onnx.TrainingMode'. This is " + "likely a bug in torch.onnx." + ) + self._training_mode = training_mode + + @property + def export_onnx_opset_version(self) -> int: + """Opset version used during export.""" + return self._export_onnx_opset_version + + @export_onnx_opset_version.setter + def export_onnx_opset_version(self, value: int): + supported_versions = range( + _constants.ONNX_MIN_OPSET, _constants.ONNX_MAX_OPSET + 1 + ) + if value not in supported_versions: + raise ValueError(f"Unsupported ONNX opset version: {value}") + self._export_onnx_opset_version = value + + @property + def in_onnx_export(self) -> bool: + """Whether it is in the middle of ONNX export.""" + return self._in_onnx_export + + @in_onnx_export.setter + def in_onnx_export(self, value: bool): + if type(value) is not bool: + raise TypeError("in_onnx_export must be a boolean") + self._in_onnx_export = value + + @property + def autograd_inlining(self) -> bool: + """Whether Autograd must be inlined.""" + return self._autograd_inlining + + @autograd_inlining.setter + def autograd_inlining(self, value: bool): + if type(value) is not bool: + raise TypeError("autograd_inlining must be a boolean") + self._autograd_inlining = value + + +GLOBALS = _InternalGlobals() diff --git a/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py new file mode 100644 index 0000000000000000000000000000000000000000..19197c0bdc788804e5c58833ed2e3949131dbe3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py @@ -0,0 +1,1233 @@ +from __future__ import annotations + +import functools +import sys +import warnings +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch._C._onnx as _C_onnx +import torch.onnx +from torch import _C + +# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics +from torch.onnx import ( + _constants, + _type_utils, + errors, + symbolic_helper, + symbolic_opset9 as opset9, +) +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +# This file exports ONNX ops for opset 10 +# Opset 10 is supported by ONNX release 1.5.0 +# release on 04/24/19 + + +__all__ = [ + "dequantize", + "div", + "embedding_bag", + "fake_quantize_per_tensor_affine", + "flip", + "fmod", + "isfinite", + "isinf", + "nan_to_num", + "quantize_per_tensor", + "quantized_add_relu", + "quantized_add", + "quantized_cat", + "quantized_conv1d_relu", + "quantized_conv2d_relu", + "quantized_conv3d_relu", + "quantized_conv1d", + "quantized_conv2d", + "quantized_conv3d", + "quantized_conv_transpose1d", + "quantized_conv_transpose2d", + "quantized_conv_transpose3d", + "quantized_group_norm", + "quantized_hardswish", + "quantized_instance_norm", + "quantized_layer_norm", + "quantized_leaky_relu", + "quantized_linear", + "quantized_linear_relu", + "quantized_mul", + "quantized_sigmoid", + "slice", + "sort", + "topk", +] + + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=10) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +@_onnx_symbolic("aten::div") +@_beartype.beartype +def div(g: jit_utils.GraphContext, self, other, *args): + if len(args) == 0: + return opset9.true_divide(g, self, other) + else: + return _div_rounding_mode(g, self, other, *args) + + +@symbolic_helper.parse_args("v", "v", "s") +@_beartype.beartype +def _div_rounding_mode(g: jit_utils.GraphContext, self, other, rounding_mode): + if rounding_mode == "floor": + return _floor_divide(g, self, other) + else: + return opset9._div_rounding_mode(g, self, other, rounding_mode) + + +@_onnx_symbolic("aten::_floor_divide") +@_beartype.beartype +def _floor_divide(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): + out = opset9.true_divide(g, self, other) + return g.op("Floor", out) + else: + # Integer division does trunction rounding + div = g.op("Div", self, other) + # Division is negative if: self < 0 != other < 0 + zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)) + negative = g.op("Xor", g.op("Less", self, zero), g.op("Less", other, zero)) + + # For negative numbers with self % other != 0, subtract 1 to round down instead of up + mod = g.op("Mod", self, other, fmod_i=0) + fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero))) + + one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + fixup = g.op("Sub", div, one) + return g.op("Where", fixup_mask, fixup, div) + + +@_onnx_symbolic("aten::sort") +@symbolic_helper.parse_args("v", "i", "i", "none") +@_beartype.beartype +def sort(g: jit_utils.GraphContext, self, dim, decending, out=None): + return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out) + + +@_onnx_symbolic("aten::topk") +@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none") +@_beartype.beartype +def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None): + return symbolic_helper._topk_helper( + g, self, k, dim, largest=largest, sorted=sorted, out=out + ) + + +def _aten_max_pool_onnx( + g: jit_utils.GraphContext, + self: _C.Value, + kernel_shape: Sequence[int], + strides: Sequence[int], + pads: Sequence[int], + dilations: Sequence[int], + ceil_mode: bool, + unbatched_rank: int, +) -> _C.Value: + self_rank = g.op("Size", g.op("Shape", self)) + if self_rank == unbatched_rank: # C,H,W -> N,C,H,W and N=1 + self = g.op( + "Unsqueeze", + self, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + pool_result, _ = g.op( + "MaxPool", + self, + outputs=2, + ceil_mode_i=ceil_mode, + dilations_i=dilations, + kernel_shape_i=kernel_shape, + pads_i=pads, + strides_i=strides, + ) + + if self_rank == unbatched_rank: + pool_result = g.op( + "Squeeze", + pool_result, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + return pool_result + + +# For MaxPool +def _adjust_attributes_of_max_pool( + expand_size: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + padding: Union[Sequence[int], int], + dilation: Union[Sequence[int], int], +) -> Tuple[Sequence[int], Sequence[int], Sequence[int], Sequence[int]]: + """Adjust attributes of avg_pool to match ONNX specification.""" + + if isinstance(dilation, int): + dilation = [dilation] * expand_size + + if isinstance(kernel_size, int): + kernel_shape = [kernel_size] * expand_size + else: + kernel_shape = kernel_size # type: ignore[assignment] + + if isinstance(padding, int): + pads = [padding] * expand_size * 2 # type: ignore[operator, assignment] + elif len(padding) == 1: + pads = padding * expand_size * 2 # type: ignore[operator, assignment] + elif len(padding) == 2: + # 2D padding + pads = padding * 2 # type: ignore[operator, assignment] + elif len(padding) == 3: + # 3D padding + pads = padding * 2 # type: ignore[operator, assignment] + else: + # When padding is already done for all dimensions, + # we don't need to double it + # eg: (1, 1, 1, 1, 1, 1) + pads = padding # type: ignore[assignment] + + if isinstance(stride, int): + strides = [stride] * expand_size + elif not stride: + strides = kernel_shape + else: + strides = stride # type: ignore[assignment] + + return (kernel_shape, strides, pads, dilation) + + +def _aten_max_pool_with_indices_onnx( + g: jit_utils.GraphContext, + self: _C.Value, + kernel_shape: Sequence[int], + strides: Sequence[int], + pads: Sequence[int], + dilations: Sequence[int], + ceil_mode: bool, + unbatched_rank: int, + n_dims_one: Sequence[int], + n_dims_zero: Sequence[int], + n_dims_axes: Sequence[int], +) -> Tuple[_C.Value, Sequence[int]]: + self_rank = g.op("Size", g.op("Shape", self)) + if self_rank == unbatched_rank: # C,H,W -> N,C,H,W and N=1 + self = g.op( + "Unsqueeze", + self, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + pool_result, indices = g.op( + "MaxPool", + self, + outputs=2, + ceil_mode_i=ceil_mode, + dilations_i=dilations, + kernel_shape_i=kernel_shape, + pads_i=pads, + strides_i=strides, + ) + _, flatten_indices = g.op( + "MaxPool", + self, + outputs=2, + dilations_i=dilations, + kernel_shape_i=n_dims_one, + strides_i=n_dims_one, + ) + + ends = g.op("Constant", value_t=torch.tensor(n_dims_one)) + starts = g.op("Constant", value_t=torch.tensor(n_dims_zero)) + axes = g.op("Constant", value_t=torch.tensor(n_dims_axes)) + + delta = g.op("Slice", flatten_indices, starts, ends, axes) + indices = g.op("Sub", indices, delta) + + if self_rank == unbatched_rank: + pool_result = g.op( + "Squeeze", pool_result, value_t=torch.tensor([0], dtype=torch.int64) + ) + indices = g.op("Squeeze", indices, value_t=torch.tensor([0], dtype=torch.int64)) + + return (pool_result, indices) + + +@_onnx_symbolic( + "aten::max_pool1d", + decorate=[_apply_params("max_pool1d", 1, return_indices=False)], +) +@_onnx_symbolic( + "aten::max_pool2d", + decorate=[_apply_params("max_pool2d", 2, return_indices=False)], +) +@_onnx_symbolic( + "aten::max_pool3d", + decorate=[_apply_params("max_pool3d", 3, return_indices=False)], +) +@_onnx_symbolic( + "aten::max_pool1d_with_indices", + decorate=[ + _apply_params( + "max_pool1d_with_indices", + 1, + return_indices=True, + ) + ], +) +@_onnx_symbolic( + "aten::max_pool2d_with_indices", + decorate=[ + _apply_params( + "max_pool2d_with_indices", + 2, + return_indices=True, + ) + ], +) +@_onnx_symbolic( + "aten::max_pool3d_with_indices", + decorate=[ + _apply_params( + "max_pool3d_with_indices", + 3, + return_indices=True, + ) + ], +) +@_beartype.beartype +def _max_pool(name: str, expand_size: int, return_indices: bool): + @symbolic_helper.quantized_args(True, False, False, False, False, False) + @symbolic_helper.parse_args("v", "is", "is", "is", "is", "i") + def symbolic_fn( + g: jit_utils.GraphContext, + input: _C.Value, + kernel_size: Sequence[int], + stride: Sequence[int], + padding: Union[int, Sequence[int]], + dilation: Sequence[int], + ceil_mode: bool, + ): + kernel_shape, strides, pads, dilations = _adjust_attributes_of_max_pool( + expand_size, kernel_size, stride, padding, dilation + ) + + if return_indices: + return _aten_max_pool_with_indices_onnx( + g, + input, + kernel_shape, + strides, + pads, + dilations, + ceil_mode, + expand_size + 1, + ([1] * expand_size), + ([0] * expand_size), + ([2 + i for i in range(expand_size)]), + ) + else: + return _aten_max_pool_onnx( + g, + input, + kernel_shape, + strides, + pads, + dilations, + ceil_mode, + expand_size + 1, + ) + + return symbolic_fn + + +# For AvgPool +def _adjust_attributes_of_avg_pool( + expand_size: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + padding: Union[Sequence[int], int], +) -> Tuple[Sequence[int], Sequence[int], Sequence[int]]: + """Adjust attributes of avg_pool to match ONNX specification.""" + + if isinstance(kernel_size, int): + kernel_shape = [kernel_size] * expand_size + else: + kernel_shape = kernel_size # type: ignore[assignment] + + if isinstance(padding, int): + pads = [padding] * expand_size * 2 + elif len(padding) == 1: + pads = padding * expand_size * 2 # type: ignore[operator, assignment] + elif len(padding) == 2: + pads = padding * expand_size # type: ignore[operator, assignment] + else: + pads = padding * 2 # type: ignore[operator, assignment] + + if isinstance(stride, int): + strides = [stride] * expand_size + elif not stride: + strides = kernel_shape + else: + strides = stride # type: ignore[assignment] + + return (kernel_shape, strides, pads) + + +@_onnx_symbolic( + "aten::avg_pool1d", + decorate=[_apply_params("avg_pool1d", 1)], +) +@_onnx_symbolic( + "aten::avg_pool2d", + decorate=[_apply_params("avg_pool2d", 2)], +) +@_onnx_symbolic( + "aten::avg_pool3d", + decorate=[_apply_params("avg_pool3d", 3)], +) +@_beartype.beartype +def _avg_pool(name, expand_size): + @symbolic_helper.quantized_args(True, False, False, False, False, False, False) + @symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none") + @_beartype.beartype + def symbolic_fn( + g, + input: _C.Value, + kernel_size: Sequence[int], + stride: Sequence[int], + padding: Union[int, Sequence[int]], + ceil_mode: int, + count_include_pad: int, + divisor_override=None, + ): + kernel_shape, strides, pads = _adjust_attributes_of_avg_pool( + expand_size, kernel_size, stride, padding + ) + + result = g.op( + "AveragePool", + input, + ceil_mode_i=ceil_mode, + count_include_pad_i=count_include_pad, + kernel_shape_i=kernel_shape, + pads_i=pads, + strides_i=strides, + ) + + return result + + return symbolic_fn + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[_apply_params("upsample_nearest1d", 3, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[_apply_params("upsample_nearest2d", 4, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[_apply_params("upsample_nearest3d", 5, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[_apply_params("upsample_linear1d", 3, "linear")], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[_apply_params("upsample_bilinear2d", 4, "linear")], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[_apply_params("upsample_trilinear3d", 5, "linear")], +) +@_beartype.beartype +def _interpolate(name, dim, interpolate_mode): + @symbolic_helper.quantized_args(True, False, False) + @_beartype.beartype + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = symbolic_helper._get_interpolate_attributes( + g, interpolate_mode, args + ) + symbolic_helper._interpolate_warning(interpolate_mode) + align_corners = symbolic_helper._maybe_get_scalar(align_corners) + if align_corners: + return symbolic_helper._unimplemented(name, "align_corners == True", input) + if scales is None: + scales = symbolic_helper._interpolate_size_to_scales( + g, input, output_size, dim + ) + return g.op("Resize", input, scales, mode_s=interpolate_mode) + + return symbolic_fn + + +@_onnx_symbolic("aten::__interpolate") +@_beartype.beartype +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + scales, mode = symbolic_helper._interpolate_get_scales_and_mode( + g, input, size, scale_factor, mode, align_corners + ) + return g.op("Resize", input, scales, mode_s=mode) + + +@_beartype.beartype +def _slice( + g: jit_utils.GraphContext, + input: torch._C.Value, + axes: Union[List, torch.Tensor, torch._C.Value], + starts: Union[List, torch.Tensor, torch._C.Value], + ends: Union[List, torch.Tensor, torch._C.Value], + steps: Optional[Union[List, torch.Tensor, torch._C.Value]] = None, +): + def is_none_value(value): + if value is None: + return True + return ( + isinstance(value, torch._C.Value) + and value.node().kind() == "prim::Constant" + and isinstance(value.type(), _C.NoneType) + ) + + def to_slice_input(list_or_value, default_value=None): + # Convert input param into a 1D torch.Value. + if is_none_value(list_or_value) and default_value is not None: + list_or_value = [default_value] + + if isinstance(list_or_value, (list, torch.Tensor)): + return g.op("Constant", value_t=torch.tensor(list_or_value)) + + rank = symbolic_helper._get_tensor_rank(list_or_value) + if rank == 0: + return symbolic_helper._unsqueeze_helper(g, list_or_value, [0]) + if rank == 1: + return list_or_value + raise errors.SymbolicValueError( + f"Rank must be 0 or 1, not {rank}", list_or_value + ) + + def get_const_value(list_or_value): + if isinstance(list_or_value, (list, torch.Tensor)): + if len(list_or_value) == 1: + return list_or_value[0] + return None + return symbolic_helper._maybe_get_const(list_or_value, "i") + + # Check if slice is a no-op + if ( + get_const_value(starts) == 0 + and get_const_value(ends) == _constants.INT64_MAX + and (steps is None or get_const_value(steps) == 1) + ): + return input + + axes = to_slice_input(axes) + starts = to_slice_input(starts, default_value=0) + ends = to_slice_input(ends, default_value=_constants.INT64_MAX) + if steps is None: + return g.op("Slice", input, starts, ends, axes) + steps = to_slice_input(steps, default_value=1) + return g.op("Slice", input, starts, ends, axes, steps) + + +@_onnx_symbolic("aten::slice") +@_beartype.beartype +def slice(g: jit_utils.GraphContext, self, *args): + if len(args) == 4: + # aten::slice(Tensor self, int dim, int? start=None, int? end=None, int step=1) -> Tensor + dims, start, end, step = args + elif len(args) == 3: + # aten::slice(t[] l, int? start=None, int? end=None, int step=1) -> t[] + start, end, step = args + dims = [0] + else: + raise errors.SymbolicValueError("Unknown aten::slice signature", self) + + return symbolic_helper._slice_helper( + g, + self, + axes=dims, + starts=start, + ends=end, + steps=step, + ) + + +@_onnx_symbolic("aten::flip") +@symbolic_helper.parse_args("v", "is") +@_beartype.beartype +def flip(g: jit_utils.GraphContext, input, dims): + return symbolic_helper._slice_helper( + g, + input, + axes=dims, + starts=[-1] * len(dims), + ends=[-_constants.INT64_MAX] * len(dims), + steps=[-1] * len(dims), + ) + + +@_onnx_symbolic("aten::fmod") +@_beartype.beartype +def fmod(g: jit_utils.GraphContext, input, other): + return g.op("Mod", input, other, fmod_i=1) + + +@_onnx_symbolic("aten::embedding_bag") +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i") +@_beartype.beartype +def embedding_bag( + g: jit_utils.GraphContext, + embedding_matrix, + indices, + offsets, + scale_grad_by_freq, + mode, + sparse, + per_sample_weights, + include_last_offset, + padding_idx, +): + if scale_grad_by_freq and GLOBALS.export_training: + return symbolic_helper._onnx_unsupported( + "embedding_bag with scale_grad_by_freq for training mode" + ) + if padding_idx is not None and padding_idx >= 0: + raise RuntimeError("embedding_bag with padding_idx") + + warnings.warn( + "Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. " + "Please use opset 11 or higher to export model for dynamic input shape.'" + ) + offsets_dim_0 = symbolic_helper._get_tensor_dim_size(offsets, 0) + if offsets_dim_0 is not None: + if include_last_offset: + offset_len = offsets_dim_0 - 1 + offsets_extended = offsets + else: + offset_len = offsets_dim_0 + offsets_extended = [ + offsets, + g.op("Constant", value_t=torch.tensor([sys.maxsize])), + ] + offsets_extended = g.op("Concat", *offsets_extended, axis_i=0) + list_ = [] + for i in range(offset_len): + start_ = symbolic_helper._unsqueeze_helper( + g, + opset9.select(g, offsets_extended, torch.tensor(0), torch.tensor(i)), + [0], + ) + end_ = symbolic_helper._unsqueeze_helper( + g, + opset9.select( + g, offsets_extended, torch.tensor(0), torch.tensor(i + 1) + ), + [0], + ) + axes_ = g.op("Constant", value_t=torch.tensor([0])) + indices_row = g.op("Slice", indices, start_, end_, axes_) + + embeddings = g.op("Gather", embedding_matrix, indices_row) + if not symbolic_helper._is_none(per_sample_weights): + per_sample_weights_row = g.op( + "Slice", per_sample_weights, start_, end_, axes_ + ) + per_sample_weights_row = symbolic_helper._unsqueeze_helper( + g, per_sample_weights_row, [1] + ) + embeddings = g.op("Mul", embeddings, per_sample_weights_row) + if mode == 0: + embeddings = symbolic_helper._reducesum_helper( + g, embeddings, axes_i=[0], keepdims_i=0 + ) + elif mode == 1: + embeddings = g.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0) + else: + embeddings = g.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0) + + embeddings = symbolic_helper._unsqueeze_helper(g, embeddings, [0]) + list_.append(embeddings) + + output = g.op("Concat", *list_, axis_i=0) + # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices. + # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag. + return output, None, None, None + else: + return symbolic_helper._onnx_unsupported( + "embedding_bag with unknown shape of offsets for opset 10 is not supported. " + "please use opset 11 or higher." + ) + + +@_onnx_symbolic("aten::fake_quantize_per_tensor_affine") +@symbolic_helper.parse_args("v", "v", "v", "i", "i") +@_beartype.beartype +def fake_quantize_per_tensor_affine( + g: jit_utils.GraphContext, + inputs, + scale, + zero_point, + quant_min=-128, + quant_max=127, +): + # NOTE: (0, 127) is a special case. PyTorch restricts activations to be in the range (0, 127). + # https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422 + if (quant_min, quant_max) == (0, 127): + symbolic_helper._onnx_opset_unsupported_detailed( + "fake_quantize_per_tensor_affine", + 10, + 13, + "Quantize range (0, 127) not supported, requires opset 13 Clip", + inputs, + ) + if (quant_min, quant_max) not in [(0, 255), (-128, 127)]: + raise errors.SymbolicValueError( + f"For (quant_min, quant_max), ONNX allows only (0, 255) and (-128, 127). " + f"Got ({quant_min}, {quant_max})", + inputs, + ) + scale = symbolic_helper._maybe_get_scalar(scale) + if scale is None: + symbolic_helper._onnx_opset_unsupported_detailed( + "fake_quantize_per_tensor_affine", + 10, + 13, + "Non-constant scale not supported", + inputs, + ) + scale = scale.float().data # Avoid exporter generating double type + if quant_min == 0: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8) + else: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8) + return g.op( + "DequantizeLinear", + g.op("QuantizeLinear", inputs, scale, zero_point), + scale, + zero_point, + ) + + +@_onnx_symbolic("aten::isinf") +@_beartype.beartype +def isinf(g: jit_utils.GraphContext, input): + return g.op("IsInf", g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.DOUBLE)) + + +@_onnx_symbolic("aten::isfinite") +@_beartype.beartype +def isfinite(g: jit_utils.GraphContext, input): + inf_node = isinf(g, input) + nan_node = opset9.isnan(g, input) + return opset9.__not_(g, opset9.__or_(g, inf_node, nan_node)) + + +@_onnx_symbolic("aten::quantize_per_tensor") +@_beartype.beartype +def quantize_per_tensor(g: jit_utils.GraphContext, input, scale, zero_point, dtype): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + # TODO(justinchuby): Extract all the cast ops into a helper function. + zero_point = g.op( + "Cast", zero_point, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return symbolic_helper.quantize_helper(g, input, scale, zero_point) + + +@_onnx_symbolic("aten::dequantize") +@_beartype.beartype +def dequantize(g: jit_utils.GraphContext, input): + return symbolic_helper.dequantize_helper(g, input)[0] + + +@_onnx_symbolic("aten::nan_to_num") +@symbolic_helper.parse_args("v", "f", "f", "f") +@_beartype.beartype +def nan_to_num(g: jit_utils.GraphContext, input, nan, posinf, neginf): + # Cannot create a int type tensor with inf/nan values, so we simply + # return the original tensor + if not symbolic_helper._is_fp(input): + return input + input_dtype = _type_utils.JitScalarType.from_value(input).dtype() + if nan is None: + nan = 0.0 + nan_cond = opset9.isnan(g, input) + nan_result = g.op( + "Where", + nan_cond, + g.op("Constant", value_t=torch.tensor([nan], dtype=input_dtype)), + input, + ) + + # For None values of posinf, neginf we use the greatest/lowest finite + # value representable by input’s dtype. + finfo = torch.finfo(input_dtype) + if posinf is None: + posinf = finfo.max + posinf_cond = opset9.logical_and( + g, + isinf(g, nan_result), + opset9.gt(g, nan_result, g.op("Constant", value_t=torch.LongTensor([0]))), + ) + nan_posinf_result = g.op( + "Where", + posinf_cond, + g.op("Constant", value_t=torch.tensor([posinf], dtype=input_dtype)), + nan_result, + ) + + if neginf is None: + neginf = finfo.min + neginf_cond = opset9.logical_and( + g, + isinf(g, nan_posinf_result), + opset9.lt( + g, nan_posinf_result, g.op("Constant", value_t=torch.LongTensor([0])) + ), + ) + return g.op( + "Where", + neginf_cond, + g.op("Constant", value_t=torch.tensor([neginf], dtype=input_dtype)), + nan_posinf_result, + ) + + +# Quantized symbolics --------------------------------------------------------- +# https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export +# Support starts from opset 10 because `DequantizeLinear` and `QuantizeLinear` were +# introduced in opset version 10. +@_onnx_symbolic("quantized::linear") +@_beartype.beartype +def quantized_linear( + g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.linear(g, input, weight, bias) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::linear_relu") +@_beartype.beartype +def quantized_linear_relu( + g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.linear(g, input, weight, bias) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::add") +@_beartype.beartype +def quantized_add(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + y, _, _, _ = symbolic_helper.dequantize_helper(g, y) + + output = opset9.add(g, x, y) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::add_relu") +@_beartype.beartype +def quantized_add_relu(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + y, _, _, _ = symbolic_helper.dequantize_helper(g, y) + + output = opset9.add(g, x, y) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::mul") +@_beartype.beartype +def quantized_mul(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + y, _, _, _ = symbolic_helper.dequantize_helper(g, y) + + output = opset9.mul(g, x, y) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::hardswish") +@_beartype.beartype +def quantized_hardswish(g: jit_utils.GraphContext, x, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.hardswish(g, x) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::sigmoid") +@_beartype.beartype +def quantized_sigmoid(g: jit_utils.GraphContext, x, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.sigmoid(g, x) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::leaky_relu") +@_beartype.beartype +def quantized_leaky_relu( + g: jit_utils.GraphContext, x, negative_slope, inplace, op_scale, op_zero_point +): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.leaky_relu(g, x, negative_slope, inplace) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::layer_norm") +@_beartype.beartype +def quantized_layer_norm( + g: jit_utils.GraphContext, + x, + normalized_shape, + weight, + bias, + eps, + op_scale, + op_zero_point, +): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.layer_norm(g, x, normalized_shape, weight, bias, eps, False) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::group_norm") +@_beartype.beartype +def quantized_group_norm( + g: jit_utils.GraphContext, + x, + num_groups, + weight, + bias, + eps, + op_scale, + op_zero_point, +): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.group_norm(g, x, num_groups, weight, bias, eps, False) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::instance_norm") +@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v") +@_beartype.beartype +def quantized_instance_norm( + g: jit_utils.GraphContext, + q_input, + weight, + bias, + eps, + op_scale, + op_zero_point, +): + input, _, _, _ = symbolic_helper.dequantize_helper(g, q_input) + + output = opset9.instance_norm( + g, input, weight, bias, None, None, False, 0.0, eps, False + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv1d_relu") +@_beartype.beartype +def quantized_conv1d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv2d_relu") +@_beartype.beartype +def quantized_conv2d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv3d_relu") +@_beartype.beartype +def quantized_conv3d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv1d") +@_beartype.beartype +def quantized_conv1d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv2d") +@_beartype.beartype +def quantized_conv2d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv3d") +@_beartype.beartype +def quantized_conv3d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose1d") +@_beartype.beartype +def quantized_conv_transpose1d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose2d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose2d") +@_beartype.beartype +def quantized_conv_transpose2d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose2d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose3d") +@_beartype.beartype +def quantized_conv_transpose3d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose3d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::cat") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def quantized_cat( + g: jit_utils.GraphContext, + q_inputs: _C.Value, + dim: int, + op_scale: _C.Value, + op_zero_point: _C.Value, +) -> _C.Value: + unpacked_inputs = symbolic_helper._unpack_list(q_inputs) + dequantized = [ + symbolic_helper.dequantize_helper(g, input)[0] for input in unpacked_inputs + ] + concatenated = g.op("Concat", *dequantized, axis_i=dim) + return symbolic_helper.quantize_helper(g, concatenated, op_scale, op_zero_point) diff --git a/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py new file mode 100644 index 0000000000000000000000000000000000000000..130b02a889b04e75ad01577245e264cb755ed868 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset12.py @@ -0,0 +1,485 @@ +from __future__ import annotations + +import functools +import sys +from typing import Optional, Tuple + +import torch +from torch._C import _onnx as _C_onnx +from torch.onnx import ( + _type_utils, + errors, + symbolic_helper, + symbolic_opset9 as opset9, + utils, +) +from torch.onnx._internal import _beartype, jit_utils, registration + + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +# This file exports ONNX ops for opset 12 + +__all__ = [ + "argmax", + "argmin", + "binary_cross_entropy_with_logits", + "celu", + "cross_entropy_loss", + "dropout", + "einsum", + "ge", + "le", + "native_dropout", + "nll_loss", + "nll_loss2d", + "nll_loss_nd", + "outer", + "pow", + "tensordot", + "unfold", +] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=12) + + +@_beartype.beartype +def _einsum_helper(g: jit_utils.GraphContext, equation, tensors): + if not tensors: + raise RuntimeError("Einsum inputs are empty.") + # ONNX does not support bool for Einsum inputs. + if symbolic_helper._is_bool(tensors[0]): + tensors = [ + g.op("Cast", tensor, to_i=_C_onnx.TensorProtoDataType.INT64) + for tensor in tensors + ] + return g.op( + "Cast", + g.op("Einsum", *tensors, equation_s=equation), + to_i=_C_onnx.TensorProtoDataType.BOOL, + ) + else: + return g.op("Einsum", *tensors, equation_s=equation) + + +@_onnx_symbolic("aten::einsum") +@symbolic_helper.parse_args("s", "v", "is") +@_beartype.beartype +def einsum(g: jit_utils.GraphContext, equation, tensor_list, path=None): + tensors = symbolic_helper._unpack_list(tensor_list) + return _einsum_helper(g, equation, tensors) + + +@_onnx_symbolic("aten::outer") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def outer(g: jit_utils.GraphContext, input, other): + # make sure to cast other to self's type + if _type_utils.JitScalarType.from_value( + other, _type_utils.JitScalarType.UNDEFINED + ) != _type_utils.JitScalarType.from_value(input): + other = g.op( + "Cast", + other, + to_i=_type_utils.JitScalarType.from_value(input).onnx_type(), + ) + return _einsum_helper(g, "i,j->ij", [input, other]) + + +@_beartype.beartype +def _dropout_returns_masked_input_and_mask( + g: jit_utils.GraphContext, input: torch._C.Value, p: float, train: bool +) -> Tuple[torch._C.Value, Optional[torch._C.Value]]: + symbolic_helper.check_training_mode(train, "dropout") + # In eval mode, dropout is non-op. That is, if the node's + # train param is set to False, dropout just returns its inputs. + if not train: + return input, None + p = g.op("Constant", value_t=torch.tensor(p)) + t = g.op("Constant", value_t=torch.tensor(train, dtype=torch.bool)) + r, mask = g.op("Dropout", input, p, t, outputs=2) + return r, mask + + +@_onnx_symbolic("aten::dropout") +@symbolic_helper.parse_args("v", "f", "b") +@_beartype.beartype +def dropout(g: jit_utils.GraphContext, input, p, train): + masked, _ = _dropout_returns_masked_input_and_mask(g, input, p, train) + return masked + + +@_onnx_symbolic("aten::native_dropout") +@symbolic_helper.parse_args("v", "f", "b") +@_beartype.beartype +def native_dropout(g: jit_utils.GraphContext, input, p, train): + return _dropout_returns_masked_input_and_mask(g, input, p, train) + + +@_onnx_symbolic("aten::nll_loss") +@_beartype.beartype +def nll_loss(g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index): + # none reduction : onnx::Constant[value={0}] + # mean reduction : onnx::Constant[value={1}] + # sum reduction : onnx::Constant[value={2}] + reduction = symbolic_helper._maybe_get_const(reduction, "i") + reduction_vals = ["none", "mean", "sum"] + reduction = reduction_vals[reduction] + + # in onnx NegativeLogLikelihoodLoss specification, ignore_index is optional without default value. + # therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100). + ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i") + if weight.node().mustBeNone(): + nllloss = g.op( + "NegativeLogLikelihoodLoss", + self, + target, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + else: + nllloss = g.op( + "NegativeLogLikelihoodLoss", + self, + target, + weight, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + + return nllloss + + +@_onnx_symbolic("aten::nll_loss2d") +@_beartype.beartype +def nll_loss2d( + g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index +): + return nll_loss(g, self, target, weight, reduction, ignore_index) + + +@_onnx_symbolic("aten::nll_loss_nd") +@_beartype.beartype +def nll_loss_nd( + g: jit_utils.GraphContext, self, target, weight, reduction, ignore_index +): + return nll_loss(g, self, target, weight, reduction, ignore_index) + + +@_onnx_symbolic("aten::cross_entropy_loss") +@_beartype.beartype +def cross_entropy_loss( + g: jit_utils.GraphContext, + self, + target, + weight, + reduction, + ignore_index, + label_smoothing, +): + # none reduction : onnx::Constant[value={0}] + # mean reduction : onnx::Constant[value={1}] + # sum reduction : onnx::Constant[value={2}] + reduction = symbolic_helper._maybe_get_const(reduction, "i") + reduction_vals = ["none", "mean", "sum"] + reduction = reduction_vals[reduction] + + label_smoothing = symbolic_helper._maybe_get_const(label_smoothing, "f") + if label_smoothing is not None and label_smoothing > 0.0: + raise errors.SymbolicValueError( + "Unsupported: ONNX does not support label_smoothing", self + ) + + # in onnx SoftmaxCrossEntropyLoss specification, ignore_index is optional without default value. + # therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100). + ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i") + if weight.node().mustBeNone(): + celoss = g.op( + "SoftmaxCrossEntropyLoss", + self, + target, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + else: + celoss = g.op( + "SoftmaxCrossEntropyLoss", + self, + target, + weight, + reduction_s=reduction, + ignore_index_i=ignore_index, + ) + + return celoss + + +@_onnx_symbolic("aten::binary_cross_entropy_with_logits") +@symbolic_helper.parse_args("v", "v", "v", "v", "i") +@_beartype.beartype +def binary_cross_entropy_with_logits( + g: jit_utils.GraphContext, input, target, weight, pos_weight, reduction +): + p = g.op("Constant", value_t=torch.tensor([1])) + sig_x = opset9.sigmoid(g, input) + log_sig_x = opset9.log(g, sig_x) + sub_1_x = opset9.sub(g, p, sig_x) + sub_1_y = opset9.sub(g, p, target) + log_1_x = opset9.log(g, sub_1_x) + if pos_weight is None or symbolic_helper._is_none(pos_weight): + output = opset9.neg( + g, + opset9.add( + g, opset9.mul(g, target, log_sig_x), opset9.mul(g, sub_1_y, log_1_x) + ), + ) + else: + output = opset9.neg( + g, + opset9.add( + g, + opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight), + opset9.mul(g, sub_1_y, log_1_x), + ), + ) + + if weight is not None and not symbolic_helper._is_none(weight): + output = opset9.mul(g, weight, output) + + reduction = symbolic_helper._maybe_get_const(reduction, "i") + if reduction == 0: + return output + elif reduction == 1: + return g.op("ReduceMean", output, keepdims_i=0) + elif reduction == 2: + return g.op("ReduceSum", output, keepdims_i=0) + else: + return symbolic_helper._onnx_unsupported( + "binary_cross_entropy_with_logits with reduction other than none, mean, or sum", + input, + ) + + +@_onnx_symbolic("aten::celu") +@_beartype.beartype +def celu(g: jit_utils.GraphContext, self, alpha): + alpha = symbolic_helper._maybe_get_const(alpha, "f") + # if the input is of type double cast it to float + if ( + _type_utils.JitScalarType.from_value(self, _type_utils.JitScalarType.UNDEFINED) + == _type_utils.JitScalarType.DOUBLE + ): + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT) + out = g.op("Celu", self, alpha_f=alpha) + return g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.DOUBLE) + + return g.op("Celu", self, alpha_f=alpha) + + +@_onnx_symbolic("aten::argmax") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmax( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax") + + +@_onnx_symbolic("aten::argmin") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmin( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin") + + +@_onnx_symbolic("aten::pow") +@_beartype.beartype +def pow(g: jit_utils.GraphContext, self, exponent): + return g.op("Pow", self, exponent) + + +@_onnx_symbolic("aten::ge") +@_beartype.beartype +def ge(g: jit_utils.GraphContext, input, other): + return g.op("GreaterOrEqual", input, other) + + +@_onnx_symbolic("aten::le") +@_beartype.beartype +def le(g: jit_utils.GraphContext, input, other): + return g.op("LessOrEqual", input, other) + + +@_onnx_symbolic("aten::unfold") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def unfold(g: jit_utils.GraphContext, input, dimension, size, step): + const_size = symbolic_helper._maybe_get_const(size, "i") + const_step = symbolic_helper._maybe_get_const(step, "i") + if not symbolic_helper._is_value(const_size) and not symbolic_helper._is_value( + const_step + ): + return opset9.unfold(g, input, dimension, const_size, const_step) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step) + + sizedim = symbolic_helper._get_tensor_dim_size(input, dimension) + if sizedim is not None: + low_start = g.op("Constant", value_t=torch.tensor(0)) + low_end = g.op("Constant", value_t=torch.tensor(sizedim)) + hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1)) + low_indices = g.op("Range", low_start, low_end, step) + hi_indices = g.op("Range", size, hi_end, step) + + low_size = symbolic_helper._size_helper( + g, low_indices, g.op("Constant", value_t=torch.tensor(0)) + ) + hi_size = symbolic_helper._size_helper( + g, hi_indices, g.op("Constant", value_t=torch.tensor(0)) + ) + + ndim = symbolic_helper._get_tensor_rank(input) + assert ndim is not None + perm = list(range(0, ndim)) + perm.append(perm.pop(dimension)) + + unsqueeze_list = [] + loop_condition = g.op("Constant", value_t=torch.tensor(1)) + loop_condition = g.op( + "Cast", loop_condition, to_i=_C_onnx.TensorProtoDataType.BOOL + ) + loop_len = g.op("Min", low_size, hi_size) + + loop, (loop_context,), _ = jit_utils.add_op_with_blocks( + g, "Loop", loop_len, loop_condition, n_blocks=1 + ) + + loop_block = loop_context.block + block_input_iter = utils._add_input_to_block(loop_block) + # FIXME(justinchuby): cond is unused? + cond = utils._add_input_to_block(loop_block) + + starts = loop_context.op("Gather", low_indices, block_input_iter) + ends = loop_context.op("Gather", hi_indices, block_input_iter) + axes = loop_context.op("Constant", value_t=torch.tensor([2])) + starts = symbolic_helper._unsqueeze_helper(loop_context, starts, [0]) + ends = symbolic_helper._unsqueeze_helper(loop_context, ends, [0]) + stack = loop_context.op("Slice", input, starts, ends, axes) + + unsqueeze = symbolic_helper._unsqueeze_helper( + loop_context, loop_context.op("Transpose", stack, perm_i=perm), [dimension] + ) + unsqueeze_list.append(unsqueeze) + concat = loop_context.op("Concat", *unsqueeze_list, axis_i=0) + + cond_out = loop_context.op( + "Cast", loop_condition, _C_onnx.TensorProtoDataType.BOOL + ) + utils._add_output_to_block(loop_block, cond_out) + utils._add_output_to_block(loop_block, concat) + + loop_output = loop.node().output() + perm = [0, 1, 2, 3, 4] + perm[0], perm[dimension + 1] = perm[dimension + 1], perm[0] + transpose = g.op("Transpose", loop_output, perm_i=perm) + squeeze = symbolic_helper._squeeze_helper(g, transpose, [0]) + + return squeeze + + return symbolic_helper._unimplemented("Unfold", "input size not accessible") + + +@_onnx_symbolic("aten::tensordot") +@symbolic_helper.parse_args("v", "v", "is", "is", "v") +@_beartype.beartype +def tensordot(g: jit_utils.GraphContext, input_a, input_b, dims_a, dims_b, out=None): + if out is not None: + symbolic_helper._unimplemented( + "Tensordot", "Out parameter is not supported for tensordot." + ) + + dim_count_a = symbolic_helper._get_tensor_rank(input_a) + if dim_count_a is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of tensordot for tensor(input_a) of unknown rank.", + input_a, + ) + + dim_count_b = symbolic_helper._get_tensor_rank(input_b) + if dim_count_b is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of tensordot for tensor(input_b) of unknown rank.", + input_b, + ) + + dims_a = [ + (dims_a[i] + dim_count_a) if (dims_a[i] < 0) else dims_a[i] + for i in range(len(dims_a)) + ] + dims_b = [ + (dims_b[i] + dim_count_b) if (dims_b[i] < 0) else dims_b[i] + for i in range(len(dims_b)) + ] + + left_dims_a = [i for i in range(dim_count_a) if (i not in dims_a)] + left_dims_b = [i for i in range(dim_count_b) if (i not in dims_b)] + + new_input_a = opset9.permute(g, input_a, left_dims_a + dims_a) + new_input_b = opset9.permute(g, input_b, dims_b + left_dims_b) + + input_shape = g.op("Shape", new_input_a) + left_sizes_a = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[0], ends=[len(left_dims_a)] + ) + shape_sizes = [ + left_sizes_a, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + ] + output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes) + + input_shape = g.op("Shape", output_a) + slices = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize] + ) + shape_sizes = [ + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + slices, + ] + output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes) + + input_shape = g.op("Shape", new_input_b) + left_sizes_b = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[len(dims_b)], ends=[sys.maxsize] + ) + slices = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[0], ends=[len(dims_b)] + ) + shape_sizes = [ + slices, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + ] + output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes) + + input_shape = g.op("Shape", output_b) + slices = symbolic_helper._slice_helper( + g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize] + ) + shape_sizes = [ + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + slices, + ] + output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes) + + output = einsum(g, "ij,jk->ik", g.op("prim::ListConstruct", *[output_a, output_b])) + + shape_sizes = [left_sizes_a, left_sizes_b] + return opset9._reshape_from_tensor(g, output, shape_sizes) diff --git a/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py new file mode 100644 index 0000000000000000000000000000000000000000..1b4b8ee7917c598cb2392656a5d36c3f666dad59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py @@ -0,0 +1,289 @@ +"""This file exports ONNX ops for opset 14. + +Note [ONNX operators that are added/updated in opset 14] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +New operators: + HardSwish, Trilu + +Updated operators: + Reshape + Add, Sub, Mul, Div + GRU, LSTM, RNN + BatchNorm, Cumsum, Relu +""" + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md +from __future__ import annotations + +import functools +from typing import Optional + +import torch +from torch.onnx import _constants, _type_utils, symbolic_helper +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration + +__all__ = [ + "hardswish", + "tril", + "triu", + "reshape", + "batch_norm", + "quantized_hardswish", + "scaled_dot_product_attention", +] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=14) + + +@_onnx_symbolic("aten::hardswish") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def hardswish(g: jit_utils.GraphContext, self): + return g.op("HardSwish", self) + + +@_onnx_symbolic("aten::tril") +@_beartype.beartype +def tril(g: jit_utils.GraphContext, self, diagonal, out=None): + return g.op("Trilu", self, diagonal, upper_i=0) + + +@_onnx_symbolic("aten::triu") +@_beartype.beartype +def triu(g: jit_utils.GraphContext, self, diagonal, out=None): + return g.op("Trilu", self, diagonal, upper_i=1) + + +@_onnx_symbolic("aten::reshape") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def reshape(g: jit_utils.GraphContext, self, shape): + # NOTE: Due to bug in ORT https://github.com/microsoft/onnxruntime/issues/10664 + # Reshape export cannot utilize the new allowzero attribute introduced in opset 14. + return symbolic_helper._reshape_helper(g, self, shape, allowzero=0) + + +@_onnx_symbolic("aten::batch_norm") +@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i") +@_beartype.beartype +def batch_norm( + g: jit_utils.GraphContext, + input, + weight, + bias, + running_mean, + running_var, + training, + momentum, + eps, + cudnn_enabled, +): + if ( + torch.is_autocast_enabled() + and not symbolic_helper.args_have_same_dtype( + [input, weight, bias, running_mean, running_var] + ) + and GLOBALS.export_onnx_opset_version < 15 + ): + return symbolic_helper._onnx_opset_unsupported_detailed( + "BatchNormalization", + 14, + 15, + "All input tensors must have the same `dtype`." + " Turn off Autocast or export using opset version 15.", + input, + ) + + symbolic_helper.check_training_mode(training, "batch_norm") + weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper( + g, input, weight, bias, running_mean, running_var + ) + out = g.op( + "BatchNormalization", + input, + weight, + bias, + running_mean, + running_var, + epsilon_f=eps, + momentum_f=1 - momentum, + training_mode_i=0 if not training else 1, + outputs=1 if not training else 3, + ) + if not training: + return out + else: + res, new_running_mean, new_running_var = out + new_running_mean.setType(running_mean.type()) + new_running_var.setType(running_var.type()) + return res + + +@_onnx_symbolic("quantized::hardswish") +@_beartype.beartype +def quantized_hardswish(g: jit_utils.GraphContext, x, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = hardswish(g, x) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +# Ported from +# https://github.com/microsoft/onnxscript/blob/6b1b81700b4523f31d8c6d3321e5d8ef5d42b764/onnxscript/function_libs/torch_aten/ops/nn.py#L1504 +# aten_scaled_dot_product_attention +# NOTE: Need op.Trilu +@_onnx_symbolic("aten::scaled_dot_product_attention") +@symbolic_helper.parse_args("v", "v", "v", "v", "f", "b", "v") +@_beartype.beartype +def scaled_dot_product_attention( + g: jit_utils.GraphContext, + query: torch._C.Value, + key: torch._C.Value, + value: torch._C.Value, + attn_mask: Optional[torch._C.Value] = None, + dropout_p: float = 0.0, + is_causal: bool = False, + scale: Optional[torch._C.Value] = None, +): + assert (not is_causal) or ( + is_causal and symbolic_helper._is_none(attn_mask) + ), "is_causal and attn_mask cannot be set at the same time" + + scale = symbolic_helper._maybe_get_const(scale, "f") + if symbolic_helper._is_none(scale): + scale = _attention_scale(g, query) + + if is_causal: + attn_mask = _causal_attention_mask(g, query, key) + + # Swap the last two axes of key + # NOTE: onnx-script has different logic here, because the attribute perms in + # transpose needs list of ints + key_shape_builtin = symbolic_helper._get_tensor_rank(key) + key_transposed_axes = list(range(key_shape_builtin)) + key_transposed_axes[-1], key_transposed_axes[-2] = ( + key_transposed_axes[-2], + key_transposed_axes[-1], + ) + key_transposed = g.op("Transpose", key, perm_i=key_transposed_axes) + + # https://github.com/pytorch/pytorch/blob/12da0c70378b5be9135c6fda62a9863bce4a4818/aten/src/ATen/native/transformers/attention.cpp#L653 + # Scale q, k before matmul for stability see https://tinyurl.com/sudb9s96 for math + query_scaled = g.op("Mul", query, g.op("Sqrt", scale)) + key_transposed_scaled = g.op("Mul", key_transposed, g.op("Sqrt", scale)) + mul_qk = g.op("MatMul", query_scaled, key_transposed_scaled) + + if symbolic_helper._is_none(attn_mask): + mul_qk_add = mul_qk + elif ( + _type_utils.JitScalarType.from_value(attn_mask) + == _type_utils.JitScalarType.BOOL + ): + # Turn the Boolean mask to float: attn_mask.masked_fill(not attn_mask, -float('inf')) + const_zero = g.op("Constant", value_t=torch.tensor([0.0])) + const_neg_inf = g.op("Constant", value_t=torch.tensor([-float("inf")])) + attn_mask = g.op("Where", attn_mask, const_zero, const_neg_inf) + mul_qk_add = g.op("Add", mul_qk, attn_mask) + elif _type_utils.JitScalarType.from_value(attn_mask) in ( + _type_utils.JitScalarType.FLOAT, + _type_utils.JitScalarType.HALF, + _type_utils.JitScalarType.BFLOAT16, + ): + mul_qk_add = g.op("Add", mul_qk, attn_mask) + else: + raise ValueError( + f"Unsupported type for attn_mask: {_type_utils.JitScalarType.from_value(attn_mask)}" + ) + + attn_weight = g.op("Softmax", mul_qk_add, axis_i=-1) + + if dropout_p != 0: + attn_weight = g.op( + "Dropout", + attn_weight, + g.op("Constant", value_t=torch.tensor(dropout_p, dtype=torch.float)), + ) + + return g.op("MatMul", attn_weight, value) + + +@_beartype.beartype +def _attention_scale( + g: jit_utils.GraphContext, query: torch._C.Value +) -> torch._C.Value: + """Calculate the scale factor for the attention result. + + Args: + query: Tensor of shape [..., L, E] + + Returns: + Scalar scale factor := 1 / math.sqrt(query.size(-1)) + """ + query_shape = g.op("Shape", query) + query_shape_last = g.op( + "Slice", + query_shape, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)), + g.op( + "Constant", value_t=torch.tensor([_constants.INT64_MAX], dtype=torch.int64) + ), + ) + embedding_size = g.op( + "Cast", + query_shape_last, + to_i=_type_utils.JitScalarType.from_value(query).onnx_type(), + ) + const_one = g.op("Constant", value_t=torch.tensor([1.0], dtype=torch.float)) + scale = g.op("Div", const_one, g.op("Sqrt", embedding_size)) + # Add a Cast to convert the scale back to original type + scale = g.op( + "Cast", + scale, + to_i=_type_utils.JitScalarType.from_value(query).onnx_type(), + ) + return scale + + +@_beartype.beartype +def _causal_attention_mask( + g: jit_utils.GraphContext, query: torch._C.Value, key: torch._C.Value +) -> torch._C.Value: + """Create a causal mask for the given query and key tensors. + + Equivalent to:: + mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0) + attn_mask = torch.zeros(L, S, dtype=torch.float) + attn_mask = attn_mask.masked_fill(not mask, -float('inf')) + + Args: + query: Tensor of shape [..., L, E] + key: Tensor of shape [..., S, E] + + Returns: + Tensor of shape [L, S] + """ + + query_shape = g.op("Shape", query) + key_shape = g.op("Shape", key) + + last_idx = g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + second_last_idx = g.op("Constant", value_t=torch.tensor([-2], dtype=torch.int64)) + target_length = g.op("Slice", query_shape, second_last_idx, last_idx) + source_length = g.op("Slice", key_shape, second_last_idx, last_idx) + # attn_mask = torch.ones(L, S) := { + size = g.op("Concat", target_length, source_length, axis_i=0) + const_one = g.op("Constant", value_t=torch.tensor([1.0])) + attn_mask = g.op("Expand", const_one, size) + # } + attn_mask = g.op("Trilu", attn_mask, upper_i=0) + # The causal mask has 0s in the lower triangle and -inf in the upper triangle. + const_zero = g.op("Constant", value_t=torch.tensor([0.0])) + const_neg_inf = g.op("Constant", value_t=torch.tensor([-float("inf")])) + attn_mask = g.op( + "Where", g.op("Equal", attn_mask, const_zero), const_neg_inf, const_zero + ) + return attn_mask diff --git a/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py new file mode 100644 index 0000000000000000000000000000000000000000..c7a771c8f894f4394a68da4f42bd6bc26b245f15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py @@ -0,0 +1,470 @@ +""" +Note [ONNX operators that are added/updated from opset 8 to opset 9] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +New operators: + Compress + ConstantOfShape + EyeLike + MaxUnpool + OneHot + Sinh + Cosh + Asinh + Acosh + Atanh + Shrink + IsNaN + Sign + Erf + Scatter + Where + NonZero + TfIdfVectorizer + MeanVarianceNormalization + +Updated operators: + BatchNormalization: removed spatial attribute. + Greater, Less, Constant, MatMul, PRelu, Gemm, Flatten: more data types{integers} supported. + Cast: more data types{string} supported. + Upsample: moved scales from attribute to input. + Scan +""" + +import functools +import warnings + +import torch +from torch._C import _onnx as _C_onnx +from torch.onnx import _type_utils, errors, symbolic_helper, symbolic_opset9 as opset9 +from torch.onnx._internal import jit_utils, registration + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=8) + +block_listed_operators = ( + "nonzero", + "where", + "scatter", + "scatter_add", + "erf", + "sign", + "isnan", + "gather", + "arange", + "masked_fill", + "index_fill", + "index_copy", + "repeat_interleave", + "any", + "all", +) + +for block_listed_op in block_listed_operators: + _onnx_symbolic(f"aten::{block_listed_op}")( + symbolic_helper._block_list_in_opset(block_listed_op) + ) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[_apply_params("upsample_nearest1d", 3, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[_apply_params("upsample_nearest2d", 4, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[_apply_params("upsample_nearest3d", 5, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[_apply_params("upsample_linear1d", 3, "linear")], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[_apply_params("upsample_bilinear2d", 4, "linear")], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[_apply_params("upsample_trilinear3d", 5, "linear")], +) +def _interpolate(name, dim, interpolate_mode): + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = symbolic_helper._get_interpolate_attributes( + g, interpolate_mode, args + ) + symbolic_helper._interpolate_warning(interpolate_mode) + align_corners = symbolic_helper._maybe_get_scalar(align_corners) + if align_corners: + return symbolic_helper._unimplemented(name, "align_corners == True", input) + output_size = symbolic_helper._maybe_get_const(output_size, "is") + if symbolic_helper._is_value(output_size): + return symbolic_helper._unimplemented( + name, "torch._C.Value (output_size) indexing" + ) + if scales is None: + scales = [ + 1.0 + if i < 2 + else float(output_size[-(dim - i)]) + / float(input.type().sizes()[-(dim - i)]) + for i in range(0, dim) + ] + return g.op("Upsample", input, mode_s=interpolate_mode, scales_f=scales) + + return symbolic_fn + + +@_onnx_symbolic("aten::__interpolate") +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + align_corners = symbolic_helper._maybe_get_const(align_corners, "b") + if not symbolic_helper._is_none(align_corners) and align_corners: + return symbolic_helper._unimplemented("interpolate", "align_corners == True") + + if not symbolic_helper._is_none(scale_factor) and symbolic_helper._is_value( + scale_factor + ): + return symbolic_helper._unimplemented( + "interpolate", "dynamic scales in opset 8" + ) + + if not symbolic_helper._is_none(size) and symbolic_helper._is_value(size): + return symbolic_helper._unimplemented("interpolate", "dynamic size in opset 8") + + scales, mode = symbolic_helper._interpolate_get_scales_and_mode( + g, input, size, scale_factor, mode, align_corners + ) + return g.op("Upsample", input, mode_s=mode, scales_f=scales) + + +# NOTE: We should create a wrapper for this kind of operation, after resolving the shape/type propagation +# issue for "cast" operators. Some symbolic functions depend on shape information of input tensor, which +# is lost after casting. +def _try_cast_integer_to_float(g: jit_utils.GraphContext, *args): + floating_scalar_types = { + _type_utils.JitScalarType.HALF, + _type_utils.JitScalarType.FLOAT, + _type_utils.JitScalarType.DOUBLE, + } + old_type = None + # Cast the input tensor to Float if its scalarType is known and is not floating number. + # If casting is performed, return the old scalarType, otherwise return None. + arg0_type = _type_utils.JitScalarType.from_value( + args[0], _type_utils.JitScalarType.UNDEFINED + ) + if arg0_type != _type_utils.JitScalarType.UNDEFINED: + old_type = arg0_type + if old_type not in floating_scalar_types: + old_type = old_type.scalar_name() + args = tuple( + g.op("Cast", arg, to_i=_C_onnx.TensorProtoDataType.FLOAT) + for arg in args + ) + else: + return (None,) + args + else: + warnings.warn( + "Only floating datatype is supported for these operators: " + "{Greater, Less, MatMul, PRelu, Gemm, Flatten}. This might cause " + "the onnx model to be incorrect, if inputs have integer datatypes." + ) + return (old_type,) + args + + +def _cast_to_type(g: jit_utils.GraphContext, input, to_type): + if to_type is None: + return input + return getattr(opset9, f"_cast_{to_type}")(g, input, False) + + +def _comparison_operator(g: jit_utils.GraphContext, input, other, op_name): + other = symbolic_helper._maybe_get_scalar(other) + other = symbolic_helper._if_scalar_type_as(other, input) + _, input, other = _try_cast_integer_to_float(g, input, other) + return g.op(op_name, input, other) + + +# NOTE: For symbolics {gt, lt, bmm, matmul, prelu, mm, addmm, view, flatten}, +# integer input type not supported in opset8. Cast to float if possible. +@_onnx_symbolic("aten::gt") +def gt(g: jit_utils.GraphContext, input, other): + return _comparison_operator(g, input, other, "Greater") + + +@_onnx_symbolic("aten::lt") +def lt(g: jit_utils.GraphContext, input, other): + return _comparison_operator(g, input, other, "Less") + + +@_onnx_symbolic("aten::bmm") +def bmm(g: jit_utils.GraphContext, self, other): + if symbolic_helper._try_get_scalar_type(self): + old_type, self, other = _try_cast_integer_to_float(g, self, other) + return _cast_to_type(g, g.op("MatMul", self, other), old_type) + else: + return g.op("MatMul", self, other) + + +@_onnx_symbolic("aten::matmul") +def matmul(g: jit_utils.GraphContext, self, other): + return bmm(g, self, other) + + +@_onnx_symbolic("aten::prelu") +def prelu(g: jit_utils.GraphContext, self, weight): + self_rank = symbolic_helper._get_tensor_rank(self) + weight_sizes = symbolic_helper._get_tensor_sizes(weight) + if self_rank is not None and self_rank > 2: + weight = g.op("Unsqueeze", weight, axes_i=list(range(1, self_rank - 1))) + elif self_rank == 0 and weight_sizes == [1]: + # self and weight are both scalar but weight has rank == 1, squeeze weight. + weight = symbolic_helper._squeeze_helper(g, weight, [0]) + if symbolic_helper._try_get_scalar_type(self): + old_type, self, weight = _try_cast_integer_to_float(g, self, weight) + return _cast_to_type(g, g.op("PRelu", self, weight), old_type) + else: + return g.op("PRelu", self, weight) + + +@_onnx_symbolic("aten::mm") +def mm(g: jit_utils.GraphContext, self, other): + # Create a dummy C tensor. Only needed for API purposes, the value is + # since beta = 0 + scalar_type = symbolic_helper._try_get_scalar_type(self, other) + if scalar_type is None: + raise errors.SymbolicValueError( + "mm can only operate on tensors with known types", self + ) + zero_constant = g.op( + "Constant", + value_t=torch.tensor([0], dtype=scalar_type.dtype()), + ) + + if symbolic_helper._try_get_scalar_type(self): + old_type, self, other, zero_constant = _try_cast_integer_to_float( + g, self, other, zero_constant + ) + return _cast_to_type( + g, + g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0), + old_type, + ) + return g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0) + + +@_onnx_symbolic("aten::addmm") +@symbolic_helper.parse_args("v", "v", "v", "t", "t") +def addmm(g: jit_utils.GraphContext, self, mat1, mat2, beta, alpha): + if symbolic_helper._try_get_scalar_type(self): + old_type, self, mat1, mat2 = _try_cast_integer_to_float(g, self, mat1, mat2) + return _cast_to_type( + g, + g.op( + "Gemm", + mat1, + mat2, + self, + beta_f=symbolic_helper._scalar(beta), + alpha_f=symbolic_helper._scalar(alpha), + ), + old_type, + ) + else: + return g.op( + "Gemm", + mat1, + mat2, + self, + beta_f=symbolic_helper._scalar(beta), + alpha_f=symbolic_helper._scalar(alpha), + ) + + +@_onnx_symbolic("aten::flatten") +def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim): + start_dim_i = symbolic_helper._get_const(start_dim, "i", "start_dim") + end_dim_i = symbolic_helper._get_const(end_dim, "i", "end_dim") + + dim = input.type().dim() + if end_dim_i < 0: + end_dim_i = dim + end_dim_i + # use ONNX's Flatten operator for cases where the output shape is 2D + if start_dim_i == 1 and end_dim_i == dim - 1: + if symbolic_helper._try_get_scalar_type(input): + old_type, input = _try_cast_integer_to_float(g, input) + return _cast_to_type( + g, g.op("Flatten", input, axis_i=start_dim_i), old_type + ) + else: + return g.op("Flatten", input, axis_i=start_dim_i) + if start_dim_i == 0 and end_dim_i == dim - 2: + if symbolic_helper._try_get_scalar_type(input): + old_type, input = _try_cast_integer_to_float(g, input) + return _cast_to_type( + g, g.op("Flatten", input, axis_i=end_dim_i + 1), old_type + ) + else: + return g.op("Flatten", input, axis_i=end_dim_i + 1) + + return opset9.flatten(g, input, start_dim, end_dim) + + +def _constant_fill(g: jit_utils.GraphContext, sizes, dtype: int, const_value): + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + if not scalar_type.dtype().is_floating_point: + result = g.op( + "ConstantFill", + sizes, + dtype_i=_type_utils.JitScalarType.FLOAT.onnx_type(), + input_as_shape_i=1, + value_f=const_value, + ) + return g.op("Cast", result, to_i=scalar_type.onnx_type()) + else: + return g.op( + "ConstantFill", + sizes, + dtype_i=scalar_type.onnx_type(), + input_as_shape_i=1, + value_f=const_value, + ) + + +@_onnx_symbolic("aten::empty") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def empty( + g: jit_utils.GraphContext, + sizes, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + return zeros(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::empty_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def empty_like( + g: jit_utils.GraphContext, + input, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + return zeros_like(g, input, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::zeros") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +def zeros(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + # NOTE: no way to set device and layout in ONNX, so we ignore it + return _constant_fill(g, sizes, dtype, 0) + + +@_onnx_symbolic("aten::zeros_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def zeros_like( + g: jit_utils.GraphContext, + input, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + return _constant_fill(g, shape, dtype, 0) + + +@_onnx_symbolic("aten::ones") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +def ones(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + return _constant_fill(g, sizes, dtype, 1) + + +@_onnx_symbolic("aten::ones_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def ones_like( + g: jit_utils.GraphContext, + input, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + return _constant_fill(g, shape, dtype, 1) + + +@_onnx_symbolic("aten::full") +def full( + g: jit_utils.GraphContext, sizes, value, dtype, layout, device, pin_memory=False +): + const_value = symbolic_helper._maybe_get_const(value, "t") + if symbolic_helper._is_value(const_value): + tmp = zeros(g, sizes, dtype, layout, device) + return opset9.add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1))) + else: + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + return _constant_fill(g, sizes, dtype, const_value) + + +@_onnx_symbolic("aten::full_like") +@symbolic_helper.parse_args("v", "f", "i", "v", "v", "v", "v") +def full_like( + g: jit_utils.GraphContext, + input, + fill_value, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + return _constant_fill(g, shape, dtype, fill_value) + + +@_onnx_symbolic("aten::repeat") +def repeat(g: jit_utils.GraphContext, self, repeats): + if not symbolic_helper._is_value(repeats): + repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) + if symbolic_helper._is_packed_list(repeats): + repeat_size_len = len(symbolic_helper._unpack_list(repeats)) + else: + const_repeats = symbolic_helper._maybe_get_const(repeats, "is") + repeat_size_len = len(const_repeats) + if self.isCompleteTensor(): + sizes = self.type().sizes() + diff_dims = repeat_size_len - len(sizes) + if diff_dims > 0: + self = opset9.view( + g, self, g.op("Constant", value_t=torch.tensor([1] * diff_dims + sizes)) + ) + return g.op("Tile", self, repeats) diff --git a/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py new file mode 100644 index 0000000000000000000000000000000000000000..81a6862ca47659e5d038fde80028de0a981261c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset9.py @@ -0,0 +1,7207 @@ +"""This file exports ONNX ops for opset 9. + +Opset 9 is supported by ONNX release 1.4.1 +release on 01/23/19 +""" +from __future__ import annotations + +import builtins +import functools +import math +import sys +import warnings +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import torch +import torch._C._onnx as _C_onnx +import torch.nn.modules.utils +import torch.onnx +from torch import _C + +# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics +from torch.onnx import _constants, _deprecation, _type_utils, errors, symbolic_helper +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration +from torch.types import Number + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +__all__ = [ + "abs", + "acos", + "add", + "addcmul", + "addmm", + "alias", + "amax", + "amin", + "aminmax", + "arange", + "argmax", + "argmin", + "as_strided", + "as_tensor", + "asin", + "atan", + "atan2", + "baddbmm", + "batch_norm", + "bernoulli", + "bitwise_not", + "bitwise_or", + "bmm", + "broadcast_tensors", + "broadcast_to", + "bucketize", + "cat", + "cdist", + "ceil", + "clamp_max", + "clamp_min", + "clamp", + "clone", + "constant_pad_nd", + "contiguous", + "conv_tbc", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + "conv1d", + "conv2d", + "conv3d", + "convert_element_type", + "convolution", + "cos", + "cosine_similarity", + "cross", + "cumsum", + "detach", + "dim", + "div", + "dot", + "dropout", + "elu", + "embedding_bag", + "embedding", + "empty_like", + "empty", + "eq", + "erf", + "exp", + "expand_as", + "expand", + "eye", + "fill", + "flatten", + "floor_divide", + "floor", + "floordiv", + "frobenius_norm", + "full_like", + "full", + "gather", + "ge", + "gelu", + "get_pool_ceil_padding", + "glu", + "group_norm", + "gt", + "hann_window", + "hardshrink", + "hardsigmoid", + "hardswish", + "hardtanh", + "index_add", + "index_copy", + "index_fill", + "index_put", + "index_select", + "index", + "instance_norm", + "is_floating_point", + "is_pinned", + "isnan", + "item", + "kl_div", + "layer_norm", + "le", + "leaky_relu", + "lerp", + "lift", + "linalg_cross", + "linalg_matrix_norm", + "linalg_norm", + "linalg_vector_norm", + "linear", + "linspace", + "log_sigmoid", + "log_softmax", + "log", + "log10", + "log1p", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logit", + "logsumexp", + "lstm_cell", + "lstm", + "lt", + "masked_fill", + "masked_fill_", + "matmul", + "max_pool1d_with_indices", + "max_pool2d_with_indices", + "max_pool3d_with_indices", + "max", + "maximum", + "meshgrid", + "min", + "minimum", + "mish", + "mm", + "movedim", + "mse_loss", + "mul", + "multinomial", + "mv", + "narrow", + "native_layer_norm", + "ne", + "neg", + "new_empty", + "new_full", + "new_ones", + "new_zeros", + "nonzero_numpy", + "nonzero", + "norm", + "numel", + "numpy_T", + "one_hot", + "ones_like", + "ones", + "onnx_placeholder", + "overload_by_arg_count", + "pad", + "pairwise_distance", + "permute", + "pixel_shuffle", + "pixel_unshuffle", + "pow", + "prelu", + "prim_constant_chunk", + "prim_constant_split", + "prim_constant", + "prim_data", + "prim_device", + "prim_dtype", + "prim_if", + "prim_layout", + "prim_list_construct", + "prim_list_unpack", + "prim_loop", + "prim_max", + "prim_min", + "prim_shape", + "prim_tolist", + "prim_tuple_construct", + "prim_type", + "prim_unchecked_cast", + "prim_uninitialized", + "rand_like", + "rand", + "randint_like", + "randint", + "randn_like", + "randn", + "reciprocal", + "reflection_pad", + "relu", + "relu6", + "remainder", + "repeat_interleave", + "repeat", + "replication_pad", + "reshape_as", + "reshape", + "roll", + "rrelu", + "rsqrt", + "rsub", + "scalar_tensor", + "scatter_add", + "scatter", + "select", + "selu", + "sigmoid", + "sign", + "silu", + "sin", + "size", + "slice", + "softmax", + "softplus", + "softshrink", + "sort", + "split_with_sizes", + "split", + "sqrt", + "square", + "squeeze", + "stack", + "std_mean", + "std", + "sub", + "t", + "take", + "tan", + "tanh", + "tanhshrink", + "tensor", + "threshold", + "to", + "topk", + "transpose", + "true_divide", + "type_as", + "unbind", + "unfold", + "unsafe_chunk", + "unsafe_split_with_sizes", + "unsafe_split", + "unsqueeze", + "unsupported_complex_operators", + "noop_complex_operators", + "unused", + "var_mean", + "var", + "view_as", + "view", + "where", + "wrap_logical_op_with_cast_to", + "wrap_logical_op_with_negation", + "zeros_like", + "zeros", + "zero", +] + + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=9) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +def _export(name: str): + """Exports the function in the current global namespace.""" + + def wrapper(func): + globals()[name] = func + __all__.append(name) + return func + + return wrapper + + +@_beartype.beartype +def unused(g): + """Represents "missing" optional inputs.""" + n = g.op("prim::Constant") + n.setType(_C.OptionalType.ofTensor()) + return n + + +@_onnx_symbolic("aten::_shape_as_tensor") +@_beartype.beartype +def _shape_as_tensor(g: jit_utils.GraphContext, input): + return g.op("Shape", input) + + +@_onnx_symbolic("aten::_reshape_from_tensor") +@_beartype.beartype +def _reshape_from_tensor(g: jit_utils.GraphContext, input, shape): + if isinstance(shape, list): + shape = g.op("Concat", *shape, axis_i=0) + return reshape(g, input, shape) + + +@_onnx_symbolic("aten::reshape") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def reshape(g: jit_utils.GraphContext, self, shape): + return symbolic_helper._reshape_helper(g, self, shape) + + +@_onnx_symbolic("aten::reshape_as") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def reshape_as(g: jit_utils.GraphContext, self, other): + shape = g.op("Shape", other) + return reshape(g, self, shape) + + +@_onnx_symbolic("aten::add") +@_beartype.beartype +def add(g: jit_utils.GraphContext, self, other, alpha=None): + if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self): + return symbolic_helper._onnx_opset_unsupported_detailed( + "Add", 9, 11, "Add between list of tensors not supported", self + ) + if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1: + other = g.op("Mul", other, alpha) + return g.op("Add", self, other) + + +@_onnx_symbolic("aten::sub") +@_beartype.beartype +def sub(g: jit_utils.GraphContext, self, other, alpha=None): + if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1: + other = g.op("Mul", other, alpha) + return g.op("Sub", self, other) + + +@_onnx_symbolic("aten::rsub") +@_beartype.beartype +def rsub(g: jit_utils.GraphContext, self, other, alpha=None): + return sub(g, other, self, alpha=alpha) + + +@_onnx_symbolic("aten::mul") +@_beartype.beartype +def mul(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_bool(self) and symbolic_helper._is_bool(other): + # ONNX Mul doesn't support Boolean, so use And as an equivalent operator. + return g.op("And", self, other) + else: + return g.op("Mul", self, other) + + +@_onnx_symbolic("aten::div") +@_beartype.beartype +def div(g: jit_utils.GraphContext, self, other, *args): + if len(args) == 0: + return true_divide(g, self, other) + else: + return _div_rounding_mode(g, self, other, *args) + + +@_onnx_symbolic("aten::addcmul") +@symbolic_helper.parse_args("v", "v", "v", "f") +@_beartype.beartype +def addcmul(g: jit_utils.GraphContext, self, tensor1, tensor2, value=1.0): + value_tens = g.op("Constant", value_t=torch.tensor([value])) + return add(g, self, mul(g, mul(g, tensor1, tensor2), value_tens)) + + +@symbolic_helper.parse_args("v", "v", "s") +@_beartype.beartype +def _div_rounding_mode(g: jit_utils.GraphContext, self, other, rounding_mode): + if rounding_mode is None: + return true_divide(g, self, other) + elif rounding_mode == "floor": + return _floor_divide(g, self, other) + elif rounding_mode == "trunc": + return _trunc_divide(g, self, other) + else: + raise errors.SymbolicValueError( + f'Unsupported rounding mode: "{rounding_mode}". Expected None, "floor" or "trunc"', + self, + ) + + +@_beartype.beartype +def _trunc_divide(g: jit_utils.GraphContext, self, other): + out = g.op("Div", self, other) + # the correct operation is truncate, which is not supported in ONNX, + # we cannot call floor since it will behave differently for negative numbers + # (eg. -0.1 should become -0 ) + # - if scalar_type information are not available, assume that + # we need to call floor (treat as float) + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.INT64) + + # Matching PyTorch's behavior: + # - if self is fp the output's type is self's type + # - if self is not fp and other is fp, the output is of type JitScalarType.FLOAT + # - self is not fp and other is not fp, the output's type is self's output type + # - the output type defaults to Float + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + if not symbolic_helper._is_fp(self) and symbolic_helper._is_fp(other): + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT) + else: + out = g.op( + "Cast", + out, + to_i=scalar_type.onnx_type(), + ) + else: + out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return out + + +@_beartype.beartype +def _floor_divide(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): + out = true_divide(g, self, other) + return g.op("Floor", out) + else: + # Integer division does trunction rounding + div = g.op("Div", self, other) + # Division is negative if: self < 0 != other < 0 + zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)) + negative = g.op( + "Xor", + symbolic_helper._lt_helper(g, self, zero), + symbolic_helper._lt_helper(g, other, zero), + ) + + # For negative numbers with self % other != 0, subtract 1 to round down instead of up + mod = g.op("Sub", self, g.op("Mul", div, other)) + fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero))) + + one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + fixup = g.op("Mul", fixup_mask, one) + return g.op("Sub", div, fixup) + + +@_onnx_symbolic("aten::floor_divide") +@_beartype.beartype +def floor_divide(g: jit_utils.GraphContext, self, other): + # Deprecated behavior, floor_divide actually truncates + return _trunc_divide(g, self, other) + + +@_onnx_symbolic("aten::floordiv") +@_beartype.beartype +def floordiv(g: jit_utils.GraphContext, self, other): + return floor_divide(g, self, other) + + +@_onnx_symbolic("aten::true_divide") +@_beartype.beartype +def true_divide(g: jit_utils.GraphContext, self, other): + """Division where both inputs are cast to floating types + + If both inputs are floating, performs div as usual + If only one input is a floating type, the other input is cast to its type + If neither input is a floating type, both inputs are cast to the default scalar type + """ + + # Case 1: either values are floating + # Performs div as usual. + # Implicit casting will be handled in scalar type analysis pass. + if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): + return g.op("Div", self, other) + + # Case 2: neither is floating + # Casts both inputs to the default scalar type + scalar_type = torch.get_default_dtype() + onnx_scalar_type = _C_onnx.TensorProtoDataType.FLOAT + assert scalar_type is torch.float or scalar_type is torch.double + if torch.get_default_dtype() is torch.double: + onnx_scalar_type = _C_onnx.TensorProtoDataType.DOUBLE + + self = g.op("Cast", self, to_i=onnx_scalar_type) + other = g.op("Cast", other, to_i=onnx_scalar_type) + return g.op("Div", self, other) + + +@_onnx_symbolic("aten::reciprocal") +@_beartype.beartype +def reciprocal(g: jit_utils.GraphContext, self): + # torch.reciprocal implicitly casts to float, so we do the same. + if not symbolic_helper._is_fp(self): + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return g.op("Reciprocal", self) + + +@_onnx_symbolic("aten::cat") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def cat(g: jit_utils.GraphContext, tensor_list, dim): + tensors = symbolic_helper._unpack_list(tensor_list) + # torch.cat ignores empty tensors such as `torch.Tensor([])` + # These needs to be removed as input from ONNX's concat too, otherwise shape inference + # will likely fail due to inputs with different ranks (0 for empty tensor, > 0 for anything else) + nonempty_tensors = [] + for t in tensors: + if symbolic_helper._is_constant(t) and not symbolic_helper._get_tensor_dim_size( + t, 0 + ): + continue + nonempty_tensors.append(t) + assert len(nonempty_tensors) > 0 + assert all( + symbolic_helper._get_tensor_rank(nonempty_tensors[0]) is None + or symbolic_helper._get_tensor_rank(t) is None + or symbolic_helper._get_tensor_rank(t) + == symbolic_helper._get_tensor_rank(nonempty_tensors[0]) + for t in nonempty_tensors + ) + tensor_list.node().removeAllInputs() + for t in nonempty_tensors: + tensor_list.node().addInput(t) + + tensors = symbolic_helper._unpack_list(tensor_list) + return g.op("Concat", *tensors, axis_i=dim) + + +@_onnx_symbolic("aten::stack") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def stack(g: jit_utils.GraphContext, tensor_list, dim): + unsqueezed = [ + symbolic_helper._unsqueeze_helper(g, t, [dim]) + for t in symbolic_helper._unpack_list(tensor_list) + ] + return g.op("Concat", *unsqueezed, axis_i=dim) + + +@_onnx_symbolic("aten::list") +@_beartype.beartype +def _list(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("aten::mm") +@_beartype.beartype +def mm(g: jit_utils.GraphContext, self, other): + # Create a dummy C tensor. Only needed for API purposes, the value is + # since beta = 0 + C = g.op("Constant", value_t=torch.tensor([1])) + return g.op("Gemm", self, other, C, beta_f=0.0, alpha_f=1.0) + + +@_onnx_symbolic("aten::bmm") +@_beartype.beartype +def bmm(g: jit_utils.GraphContext, self, other): + return g.op("MatMul", self, other) + + +@_onnx_symbolic("aten::matmul") +@_beartype.beartype +def matmul(g: jit_utils.GraphContext, self, other): + return g.op("MatMul", self, other) + + +@_onnx_symbolic("aten::addmm") +@symbolic_helper.parse_args("v", "v", "v", "t", "t") +@_beartype.beartype +def addmm(g: jit_utils.GraphContext, self, mat1, mat2, beta, alpha): + scalar_type = None + self_scalar_type = symbolic_helper._try_get_scalar_type(self) + mat1_scalar_type = symbolic_helper._try_get_scalar_type(mat1) + mat2_scalar_type = symbolic_helper._try_get_scalar_type(mat2) + if self_scalar_type is not None: + scalar_type = self_scalar_type + elif mat1_scalar_type is not None: + scalar_type = mat1_scalar_type + elif mat2_scalar_type is not None: + scalar_type = mat2_scalar_type + + mat1_rank = symbolic_helper._get_tensor_rank(mat1) + mat2_rank = symbolic_helper._get_tensor_rank(mat2) + + def is_not_none_nor(v, u): + return v is not None and v != u + + if scalar_type is not None and ( + is_not_none_nor(mat1_rank, 2) or is_not_none_nor(mat2_rank, 2) + ): + res1 = g.op("MatMul", mat1, mat2) + res2 = self + + alpha = symbolic_helper._scalar(alpha) + beta = symbolic_helper._scalar(beta) + + if alpha != 1: + alpha = g.op( + "Constant", value_t=torch.tensor(alpha, dtype=scalar_type.dtype()) + ) + res1 = g.op("Mul", res1, alpha) + if beta != 1: + beta = g.op( + "Constant", + value_t=torch.tensor( + symbolic_helper._scalar(beta), dtype=scalar_type.dtype() + ), + ) + res2 = g.op("Mul", res2, beta) + + return g.op("Add", res1, res2) + + return g.op( + "Gemm", + mat1, + mat2, + self, + beta_f=symbolic_helper._scalar(beta), + alpha_f=symbolic_helper._scalar(alpha), + ) + + +@_onnx_symbolic("aten::neg") +@_beartype.beartype +def neg(g: jit_utils.GraphContext, self): + return g.op("Neg", self) + + +@_onnx_symbolic("aten::sqrt") +@_beartype.beartype +def sqrt(g: jit_utils.GraphContext, self): + if _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) in { + _type_utils.JitScalarType.UINT8, + _type_utils.JitScalarType.INT8, + _type_utils.JitScalarType.INT16, + _type_utils.JitScalarType.INT, + _type_utils.JitScalarType.INT64, + }: + # torch converts all int inputs to sqrt to float + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT) + + return g.op("Sqrt", self) + + +@_onnx_symbolic("aten::rsqrt") +@_beartype.beartype +def rsqrt(g: jit_utils.GraphContext, self): + return g.op( + "Div", symbolic_helper._if_scalar_type_as(torch.ones(1), self), sqrt(g, self) + ) + + +@_onnx_symbolic("aten::tanh") +# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qtanh.cpp +@symbolic_helper.quantized_args(True, scale=2.0 / 256.0, zero_point=128) +@_beartype.beartype +def tanh(g: jit_utils.GraphContext, self): + return g.op("Tanh", self) + + +@_onnx_symbolic("aten::sin") +@_beartype.beartype +def sin(g: jit_utils.GraphContext, self): + return g.op("Sin", self) + + +@_onnx_symbolic("aten::cos") +@_beartype.beartype +def cos(g: jit_utils.GraphContext, self): + return g.op("Cos", self) + + +@_onnx_symbolic("aten::tan") +@_beartype.beartype +def tan(g: jit_utils.GraphContext, self): + return g.op("Tan", self) + + +@_onnx_symbolic("aten::asin") +@_beartype.beartype +def asin(g: jit_utils.GraphContext, self): + return g.op("Asin", self) + + +@_onnx_symbolic("aten::acos") +@_beartype.beartype +def acos(g: jit_utils.GraphContext, self): + return g.op("Acos", self) + + +@_onnx_symbolic("aten::atan") +@_beartype.beartype +def atan(g: jit_utils.GraphContext, self): + return g.op("Atan", self) + + +@_onnx_symbolic("aten::atan2") +@_beartype.beartype +def atan2(g: jit_utils.GraphContext, self, other): + # self is y, and other is x on coordinate + slope = g.op("Div", self, other) + atan = g.op("Atan", slope) + const_zero = g.op("Constant", value_t=torch.tensor(0)) + const_pi = g.op("Constant", value_t=torch.tensor(math.pi)) + + condition_second_or_third_quadrant = g.op("Greater", self, const_zero) + second_third_quadrant = g.op( + "Where", + condition_second_or_third_quadrant, + g.op("Add", atan, const_pi), + g.op("Sub", atan, const_pi), + ) + + condition_14_or_23_quadrant = g.op("Less", other, const_zero) + result = g.op("Where", condition_14_or_23_quadrant, second_third_quadrant, atan) + + return result + + +@_onnx_symbolic("aten::sigmoid") +# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qsigmoid.cpp +@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0) +@_beartype.beartype +def sigmoid(g: jit_utils.GraphContext, self): + return g.op("Sigmoid", self) + + +@_onnx_symbolic("aten::sign") +@_beartype.beartype +def sign(g: jit_utils.GraphContext, self): + return g.op("Sign", self) + + +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def _slice(g: jit_utils.GraphContext, input, axes, starts, ends): + assert len(starts) == len(ends) + if len(starts) == 1 and starts[0] == 0 and ends[0] == _constants.INT64_MAX: + return input + return g.op("Slice", input, axes_i=axes, starts_i=starts, ends_i=ends) + + +@_beartype.beartype +def _maybe_cast_reduce_op_input(g: jit_utils.GraphContext, self): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + # This check only covers traced modules where dtype is present + # pytorch reduce-ops cast all other integral types to int64 + if ( + not symbolic_helper._is_fp(self) + and scalar_type != _type_utils.JitScalarType.INT64 + ): + self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.INT64) + return self + + +@_beartype.beartype +def _reduce_op_symbolic(onnx_op_name, allow_multi_dim_support=True): + @_beartype.beartype + def symbolic(g, self, dim=None, keepdim=None): + self = _maybe_cast_reduce_op_input(g, self) + if dim is None or dim == tuple(): + # Dim can be 0, which will cause (not dim) == True. So we don't want to do + # (not dim) + # all-reduce path + return symbolic_helper._handle_reduce_dim_none(g, self, onnx_op_name) + else: + # dim-reduce path + desc = "is" if allow_multi_dim_support else "i" + dim, keepdim = symbolic_helper._get_const( + dim, desc, "dim" + ), symbolic_helper._get_const(keepdim, "i", "keepdim") + dim_list = dim if allow_multi_dim_support else [dim] + return g.op(onnx_op_name, self, axes_i=dim_list, keepdims_i=keepdim) + + return symbolic + + +@_beartype.beartype +def overload_by_arg_count(fn): + @functools.wraps(fn) + @_beartype.beartype + def wrapper(g, *args): + overloads = fn(g, *args) + for overload in overloads: + arg_descriptors = overload._arg_descriptors + if len(arg_descriptors) == len(args): + return overload(g, *args) + return symbolic_helper._unimplemented( + f"aten::{fn.__name__}", f"with {len(args)} arguments" + ) + + return wrapper + + +@_onnx_symbolic("aten::sum", decorate=[_apply_params("ReduceSum", "sum")]) +@_onnx_symbolic("aten::mean", decorate=[_apply_params("ReduceMean", "mean")]) +# torch.prod does not support multidimensional "dim" +@_onnx_symbolic( + "aten::prod", + decorate=[_apply_params("ReduceProd", "prod", allow_multi_dim_support=False)], +) +@_beartype.beartype +def _reduce_with_dtype(onnx_op: str, name: str, allow_multi_dim_support: bool = True): + symbolic = _reduce_op_symbolic( + onnx_op, allow_multi_dim_support=allow_multi_dim_support + ) + + @overload_by_arg_count + def reduce(g, *args, **kwargs): + @symbolic_helper.quantized_args(True) + @symbolic_helper.parse_args("v", "none") + def reduce_nodim(g, self, dtype): + dtype_onnx = None + if dtype.node().kind() == "onnx::Constant": + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type() + self = g.op("Cast", self, to_i=dtype_onnx) + elif dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented(name, "dtype", dtype) + result = symbolic(g, self) + if dtype_onnx is not None: + result_dtype_onnx = _type_utils.JitScalarType.from_value( + result + ).onnx_type() + if result_dtype_onnx != dtype_onnx: + result = g.op("Cast", result, to_i=dtype_onnx) + return result + + dim_desc = "is" if allow_multi_dim_support else "i" + + @symbolic_helper.quantized_args(True) + @symbolic_helper.parse_args("v", dim_desc, "i", "none") # type: ignore[arg-type] + def reduce_dim(g, self, dim, keepdim, dtype): + dtype_onnx = None + if dtype.node().kind() == "onnx::Constant": + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + dtype_onnx = _type_utils.JitScalarType(dtype).onnx_type() + self = g.op("Cast", self, to_i=dtype_onnx) + elif dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented(name, "dtype", dtype) + result = symbolic(g, self, dim, keepdim) + if dtype_onnx is not None: + result_dtype_onnx = _type_utils.JitScalarType.from_value( + result + ).onnx_type() + if result_dtype_onnx != dtype_onnx: + result = g.op("Cast", result, to_i=dtype_onnx) + return result + + return reduce_nodim, reduce_dim + + return reduce + + +@_onnx_symbolic("aten::cumsum") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def cumsum(g: jit_utils.GraphContext, input, dim, dtype): + if symbolic_helper.is_caffe2_aten_fallback(): + if dtype.node().kind() != "prim::Constant": + return symbolic_helper._unimplemented("cumsum", "dtype", dtype) + return g.at("cumsum", input, dim_i=dim) + + symbolic_helper._onnx_opset_unsupported("cumsum", 9, 11, input) + + +@_onnx_symbolic("aten::_sample_dirichlet") +@_beartype.beartype +def _sample_dirichlet(g: jit_utils.GraphContext, self, generator): + if symbolic_helper.is_caffe2_aten_fallback(): + if not symbolic_helper._is_none(generator): + return symbolic_helper._unimplemented( + "_sample_dirichlet", "We are not able to export generator", self + ) + return g.at("_sample_dirichlet", self) + return symbolic_helper._onnx_unsupported("_sample_dirichlet", self) + + +@_onnx_symbolic("aten::_standard_gamma") +@_beartype.beartype +def _standard_gamma(g: jit_utils.GraphContext, self, generator): + if symbolic_helper.is_caffe2_aten_fallback(): + if not symbolic_helper._is_none(generator): + return symbolic_helper._unimplemented( + "_standard_gamma", "not able to export generator", self + ) + return g.at("_standard_gamma", self) + + return symbolic_helper._onnx_unsupported("_standard_gamma", self) + + +@_onnx_symbolic("aten::t") +@_beartype.beartype +def t(g: jit_utils.GraphContext, self): + rank = symbolic_helper._get_tensor_rank(self) + if rank is None or rank < 2: + # The transpose of a 1d or 0d tensor is itself. ONNX does not define the behavior + # clearly and onnxruntime fails on these cases. So we add an Identity node to + # mirror the behavior of eager mode. + return g.op("Identity", self) + return g.op("Transpose", self, perm_i=(1, 0)) + + +@_onnx_symbolic("aten::numpy_T") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def numpy_T(g: jit_utils.GraphContext, input): + ndim = symbolic_helper._get_tensor_rank(input) + assert ndim is not None + perm = list(reversed(range(0, ndim))) + return g.op("Transpose", input, perm_i=perm) + + +@_onnx_symbolic("aten::expand") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def expand(g: jit_utils.GraphContext, self, size, implicit): + size = symbolic_helper._maybe_get_const(size, "is") + if not symbolic_helper._is_value(size): + size = g.op("Constant", value_t=torch.LongTensor(size)) + elif symbolic_helper._is_packed_list(size): + # Expand with -1 dim value means dim is unchanged. + # Since onnx::expand supports two-way broadcasting, + # -1 dim value can be exported to onnx as 1 + size = symbolic_helper._reshape_helper( + g, stack(g, size, 0), g.op("Constant", value_t=torch.tensor([-1])) + ) + dtype = _type_utils.JitScalarType.INT64 + ones = ones_like(g, size, dtype) + neg_ones = mul(g, ones, g.op("Constant", value_t=torch.tensor(-1))) + size = where(g, g.op("Equal", size, neg_ones), ones, size) + return g.op("Expand", self, size) + + +@_onnx_symbolic("aten::broadcast_to") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def broadcast_to(g: jit_utils.GraphContext, self, size): + size = symbolic_helper._maybe_get_const(size, "is") + if not symbolic_helper._is_value(size): + size = g.op("Constant", value_t=torch.LongTensor(size)) + elif symbolic_helper._is_packed_list(size): + # Expand with -1 dim value means dim is unchanged. + # Since onnx::expand supports two-way broadcasting, + # -1 dim value can be exported to onnx as 1 + size = symbolic_helper._reshape_helper( + g, stack(g, size, 0), g.op("Constant", value_t=torch.tensor([-1])) + ) + dtype = _type_utils.JitScalarType.INT64 + ones = ones_like(g, size, dtype) + neg_ones = mul(g, ones, g.op("Constant", value_t=torch.tensor(-1))) + size = where(g, g.op("Equal", size, neg_ones), ones, size) + return g.op("Expand", self, size) + + +@_onnx_symbolic("aten::expand_as") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def expand_as(g: jit_utils.GraphContext, self, other): + self_t = symbolic_helper._maybe_get_const(self, "t") + if isinstance(self_t, torch.Tensor): + orig_type = self_t.dtype + self_t = self_t.to(torch.double) + dims = [] + for d in range(self_t.dim()): + if torch.equal(self_t.mean(d).unsqueeze(d).expand_as(self_t), self_t): + dims.append(d) + self = g.op( + "Constant", value_t=self_t.mean(dims, keepdim=True).to(orig_type) + ) + + shape = g.op("Shape", other) + return g.op("Expand", self, shape) + + +@_onnx_symbolic("aten::embedding") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "i", "b", "v") +@_beartype.beartype +def embedding( + g: jit_utils.GraphContext, + weight, + indices, + padding_idx, + scale_grad_by_freq, + sparse, +): + if scale_grad_by_freq and GLOBALS.export_training: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of embedding with scale_grad_by_freq=True " + "for training mode. ONNX does not support scaling the gradients.", + weight, + ) + if padding_idx >= 0 and GLOBALS.export_training: + warnings.warn( + "Warning: ONNX export of embedding with padding_idx >= 0 " + "for training mode. " + "ONNX does not support not updating the embedding vector at padding_idx during training." + ) + + return g.op("Gather", weight, indices) + + +@_onnx_symbolic("aten::embedding_bag") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i") +@_beartype.beartype +def embedding_bag( + g: jit_utils.GraphContext, + embedding_matrix, + indices, + offsets, + scale_grad_by_freq, + mode, + sparse, + per_sample_weights, + include_last_offset, + padding_idx, +): + if not symbolic_helper._is_none(per_sample_weights): + return symbolic_helper._onnx_unsupported( + "embedding_bag with per_sample_weights" + ) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "embedding_bag", + embedding_matrix, + indices, + offsets, + outputs=4, + scale_grad_by_freq_i=scale_grad_by_freq, + mode_i=mode, + sparse_i=sparse, + include_last_offset_i=include_last_offset, + padding_idx_i=padding_idx, + ) + + return symbolic_helper._onnx_unsupported("embedding_bag", embedding_matrix) + + +@_onnx_symbolic("aten::size") +@symbolic_helper.quantized_args(True, quantize_output=False) +@_beartype.beartype +def size(g: jit_utils.GraphContext, self, dim=None): + if dim is None: + return g.op("Shape", self) + if symbolic_helper._maybe_get_const(dim, "i") < 0: + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + dim = symbolic_helper._maybe_get_const(dim, "i") + rank + dim = g.op("Constant", value_t=torch.tensor(dim)) + return symbolic_helper._size_helper(g, self, dim) + + +@_onnx_symbolic("aten::transpose") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def transpose(g: jit_utils.GraphContext, self, dim0, dim1): + if dim0 == dim1: # micro-optimization + return self + + # NB: Transpose in ONNX is actually a Permute + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + axes = list(range(rank)) + axes[dim0], axes[dim1] = axes[dim1], axes[dim0] + return g.op("Transpose", self, perm_i=axes) + elif symbolic_helper.is_caffe2_aten_fallback(): + # if we don't have dim information we cannot + # output a permute so use ATen instead + return g.at("transpose", self, overload_name="int", dim0_i=dim0, dim1_i=dim1) + else: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of transpose for tensor of unknown rank.", + self, + ) + + +@_onnx_symbolic("aten::permute") +@symbolic_helper.parse_args("v", "is") +@_beartype.beartype +def permute(g: jit_utils.GraphContext, self, dims): + if dims == list(range(0, len(dims))): + return self + return g.op("Transpose", self, perm_i=dims) + + +@_onnx_symbolic("aten::view") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def view(g: jit_utils.GraphContext, self, size): + return reshape(g, self, size) + + +@_onnx_symbolic("aten::view_as") +@_beartype.beartype +def view_as(g: jit_utils.GraphContext, self, other): + shape = g.op("Shape", other) + return reshape(g, self, shape) + + +@_onnx_symbolic("aten::unsafe_chunk") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def unsafe_chunk(g: jit_utils.GraphContext, self, chunks, dim, _outputs=None): + if _outputs is None: + return symbolic_helper._onnx_opset_unsupported_detailed( + "unsafe_chunk", 9, 11, "Dynamic number of outputs not supported", self + ) + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + return symbolic_helper._unimplemented( + "unsafe_chunk", "unknown dimension size", self + ) + split_size = (size + chunks - 1) // chunks + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::split") +@symbolic_helper.parse_args("v", "v", "i", "i") +@_beartype.beartype +def split(g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None): + if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs): + return symbolic_helper._onnx_opset_unsupported_detailed( + "split", 9, 11, "Dynamic number of outputs not supported", self + ) + split_val = symbolic_helper._node_get(split_size_or_sizes.node(), "value") + if split_val.dim() > 0: + return split_with_sizes(g, self, split_size_or_sizes, dim, _outputs) + split_size = symbolic_helper._get_const(split_size_or_sizes, "i", "split_size") + + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + if _outputs is not None: + size = split_size * _outputs + else: + return symbolic_helper._onnx_opset_unsupported_detailed( + "split", 9, 11, "Unknown dimension size not supported", self + ) + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::unsafe_split") +@_beartype.beartype +def unsafe_split( + g: jit_utils.GraphContext, self, split_size_or_sizes, dim, _outputs=None +): + return split(g, self, split_size_or_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::split_with_sizes") +@symbolic_helper.parse_args("v", "is", "i", "i") +@_beartype.beartype +def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): + if not symbolic_helper._is_split_static(split_sizes, _outputs): + return symbolic_helper._onnx_opset_unsupported_detailed( + "split_with_sizes", 9, 11, "Dynamic number of outputs not supported", self + ) + return g.op("Split", self, split_i=split_sizes, axis_i=dim, outputs=_outputs) + + +@_onnx_symbolic("aten::unsafe_split_with_sizes") +@_beartype.beartype +def unsafe_split_with_sizes( + g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None +): + return split_with_sizes(g, self, split_sizes, dim, _outputs) + + +@_onnx_symbolic("aten::unbind") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def unbind(g: jit_utils.GraphContext, self, dim=0, _outputs=None): + if _outputs is None: + return symbolic_helper._onnx_opset_unsupported_detailed( + "unbind", 9, 11, "Dynamic number of outputs not supported", self + ) + + outputs = g.op("Split", self, split_i=[1] * _outputs, axis_i=dim, outputs=_outputs) + outputs = [outputs] if _outputs == 1 else outputs + squeezed_outputs = [ + symbolic_helper._squeeze_helper(g, out, [dim]) for out in outputs + ] + return squeezed_outputs + + +@_onnx_symbolic("aten::select") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "i", "v") +@_beartype.beartype +def select(g: jit_utils.GraphContext, self, dim, index): + index = symbolic_helper._maybe_get_scalar(index) + if (not symbolic_helper._is_value(index)) and (index < 0): + if index == -1: + end_index = _constants.INT64_MAX + else: + end_index = index + 1 + slice_node = symbolic_helper._slice_helper( + g, self, axes=[dim], starts=[index], ends=[end_index] + ) + return symbolic_helper._squeeze_helper(g, slice_node, [dim]) + else: + # FIXME(justinchuby): can index be an int and not a value? + return g.op("Gather", self, index, axis_i=dim) + + +@_onnx_symbolic("aten::square") +@_beartype.beartype +def square(g: jit_utils.GraphContext, self): + return g.op("Mul", self, self) + + +@_onnx_symbolic("aten::squeeze") +@_beartype.beartype +def squeeze(g: jit_utils.GraphContext, self, dim=None): + if dim is None: + return g.op("Squeeze", self) + + squeeze_dim = symbolic_helper._get_const(dim, "i", "dim") + # Handle negative dims + if squeeze_dim < 0: + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + warnings.warn( + "ONNX export squeeze with negative axis " + + str(squeeze_dim) + + " might cause the onnx model to be incorrect. " + + "Negative axis is not supported in ONNX. " + + "Axis is converted to " + + str(squeeze_dim + rank) + + " based on input shape at export time. " + + "Passing an tensor of different rank in execution will be incorrect." + ) + squeeze_dim += rank + else: + return symbolic_helper._unimplemented( + "squeeze", "negative axis with unknown input rank", self + ) + + dim_size = symbolic_helper._get_tensor_dim_size(self, squeeze_dim) + if dim_size is None: + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(squeeze_dim) + + " on an input " + + "with unknown shape. Note that if the size of dimension " + + str(squeeze_dim) + + " of the input " + + "is not 1, the ONNX model will return an error. Opset version 11 supports squeezing on " + + "non-singleton dimensions, it is recommended to export this model using opset " + + "version 11 or higher." + ) + return symbolic_helper._squeeze_helper(g, self, axes_i=[squeeze_dim]) + if dim_size > 1: + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(squeeze_dim) + + ". The size of " + + "this dimension in the given input is " + + str(dim_size) + + ". The model will " + + "be exported without the squeeze node. If the model is intended to be used with dynamic " + + "input shapes, please use opset version 11 to " + + "export the model." + ) + return self + + warnings.warn( + "This model contains a squeeze operation on dimension " + + str(squeeze_dim) + + ". If the model is " + + "intended to be used with dynamic input shapes, please use opset version 11 to export the model." + ) + return symbolic_helper._squeeze_helper(g, self, axes_i=[squeeze_dim]) + + +@_onnx_symbolic("aten::prelu") +@_beartype.beartype +def prelu(g: jit_utils.GraphContext, self, weight): + self_rank = symbolic_helper._get_tensor_rank(self) + weight_sizes = symbolic_helper._get_tensor_sizes(weight) + weight_rank = len(weight_sizes) + if self_rank is not None: + if self_rank > 2: + # make weight unidirectional broadcastable + weight = symbolic_helper._unsqueeze_helper( + g, weight, list(range(1, self_rank - 1)) + ) + elif self_rank == 0 and weight_sizes == [1]: + # self and weight are both scalar but weight has rank == 1, squeeze weight. + weight = symbolic_helper._squeeze_helper(g, weight, [0]) + weight_rank = 0 + + if self_rank is not None and weight_rank is not None: + assert ( + self_rank >= weight_rank + ), f"rank(x) should be >= rank(slope) but got {self_rank} < {weight_rank}" + return g.op("PRelu", self, weight) + + +@_onnx_symbolic("aten::silu") +@_beartype.beartype +def silu(g: jit_utils.GraphContext, input): + return g.op("Mul", input, g.op("Sigmoid", input)) + + +@_onnx_symbolic("aten::mish") +@_beartype.beartype +def mish(g: jit_utils.GraphContext, input): + return g.op("Mul", input, g.op("Tanh", g.op("Softplus", input))) + + +@_beartype.beartype +def _op_with_optional_float_cast(g: jit_utils.GraphContext, op_name, *args, **kwargs): + """Some PyTorch operators (e.g., Clip/Min/ReLU/Pad) are super set of ONNX in terms of data types. + This function maximizes the exportability of PyTorch-ONNX by allowing ONNX-unsupported PyTorch + operator data type. For example, `Cast(Clip(Cast(INPUT)))` can be used to mimic + `Clip(INPUT)` (opset version < 12). + + Args: + g (torch._C.Graph): graph to write the ONNX representation into. + op_name (str): operator name in ONNX. + *args (tuple): operands to the operator. + **kwargs (dict): attributes to the operator along with "opset_before" (optional, None by default) + indicating the smallest opset version to trigger such casting behavior and "target_float_t" + (optional, torch.onnx.JitScalarType.FLOAT by default) indicating the data type of internal operator. + + Returns: + Optional[torch._C.Value, Tuple[torch._C.Value, ...]]: output(s) of the operator. + """ + opset_before = kwargs.pop("opset_before", None) + target_float_t = kwargs.pop("target_float_t", _type_utils.JitScalarType.FLOAT) + + inputs = list(args) + dtype_0 = _type_utils.JitScalarType.from_value(inputs[0]) + + require_cast = not symbolic_helper._is_fp(inputs[0]) and ( + opset_before is None or GLOBALS.export_onnx_opset_version < opset_before + ) + + if require_cast: + for input in inputs: + if input.isCompleteTensor(): + input_scalar_type = _type_utils.JitScalarType.from_value(input) + if input_scalar_type != dtype_0: + raise errors.SymbolicValueError( + f"Inputs of {op_name} must have same dtype." + f"Got {dtype_0.scalar_name()} and {input_scalar_type.scalar_name()}", + input, + ) + for i, input in enumerate(inputs): + if input.isCompleteTensor() and not symbolic_helper._is_fp(input): + inputs[i] = g.op( + "Cast", + input, + to_i=target_float_t.onnx_type(), + ) + + self = g.op(op_name, *inputs, **kwargs) + + if require_cast: + self = g.op("Cast", self, to_i=dtype_0.onnx_type()) + + return self + + +@_onnx_symbolic("aten::relu") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def relu(g: jit_utils.GraphContext, input): + return _op_with_optional_float_cast(g, "Relu", input, opset_before=14) + + +@_onnx_symbolic("aten::relu6") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def relu6(g: jit_utils.GraphContext, input): + return clamp(g, input, 0, 6) + + +@_onnx_symbolic("aten::ceil") +@_beartype.beartype +def ceil(g: jit_utils.GraphContext, input): + return g.op("Ceil", input) + + +@_onnx_symbolic("aten::floor") +@_beartype.beartype +def floor(g: jit_utils.GraphContext, input): + return g.op("Floor", input) + + +@_onnx_symbolic("aten::len") +@_beartype.beartype +def _len(g: jit_utils.GraphContext, self): + sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0]))) + return symbolic_helper._squeeze_helper(g, sz_0, [0]) + + +@_onnx_symbolic("aten::threshold") +@symbolic_helper.parse_args("v", "t", "t") +@_beartype.beartype +def threshold(g: jit_utils.GraphContext, self, threshold, value): + # See Note [Export inplace] + if symbolic_helper._scalar(threshold) != 0: + return symbolic_helper._unimplemented("threshold", "non-zero threshold", self) + if symbolic_helper._scalar(value) != 0: + return symbolic_helper._unimplemented("threshold", "non-zero value", self) + return g.op("Relu", self) + + +@_onnx_symbolic("aten::leaky_relu") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "f", "b") +@_beartype.beartype +def leaky_relu( + g: jit_utils.GraphContext, + input: _C.Value, + negative_slope: float, + inplace: bool = False, +): + # See Note [Export inplace] + return g.op("LeakyRelu", input, alpha_f=negative_slope) + + +@_onnx_symbolic("aten::glu") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def glu(g: jit_utils.GraphContext, input, dim): + dim_size = symbolic_helper._get_tensor_dim_size(input, dim) + if dim_size is not None: + assert dim_size % 2 == 0 + + first, second = g.op("Split", input, axis_i=dim, outputs=2) + return g.op("Mul", first, g.op("Sigmoid", second)) + + +@_onnx_symbolic("aten::softmax") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def softmax(g: jit_utils.GraphContext, input, dim, dtype=None): + # Softmax does normalization at vector level. + # PyTorch and ONNX use different strategies to split the input tensor into vectors. + # Thus dim and axis have different meanings. + # PyTorch slices the input tensor into vectors along the `dim`-th dimension. + # ONNX reshapes the input into a 2-D tensor, and `axis` indicates where the input is coerced. + # If input is a 2 x 3 tensor: + # input = [[1.0, 1.0, 1.0], + # [1.0, 1,0, 1,0]] + # with dim = 0, the result is: + # result = [[0.5, 0.5, 0.5], + # [0.5, 0.5, 0.5]] + # with axis = 0, the result is: + # result = [[0.167, 0.167, 0.167], + # [0.167, 0.167, 0.167]] + # So only when dim and axis both equal to ndim - 1 (the last dimension), + # their semantics are equivalent. + # So use softmax when dim and axis both equal to ndim - 1, + # otherwise transpose the input to put the vectors to be normalized to the last dimension. + # When input rank is not known at export time we compute softmax using a subgraph + # with other operators + input_dim = symbolic_helper._get_tensor_rank(input) + if input_dim is not None: + # TODO: remove this as onnx opset 11 spec allows negative axes + if dim < 0: + dim = input_dim + dim + + is_transpose_required = input_dim != dim + 1 + + if is_transpose_required: + axes = list(range(input_dim)) + axes[dim], axes[-1] = axes[-1], axes[dim] + input = g.op("Transpose", input, perm_i=axes) + dim = input_dim - 1 + + softmax = g.op("Softmax", input, axis_i=dim) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + softmax = g.op( + "Cast", + softmax, + to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type(), + ) + + if is_transpose_required: + softmax = g.op("Transpose", softmax, perm_i=axes) # type: ignore[possibly-undefined] + return softmax + + # Apply max normalization. + input = g.op("Sub", input, g.op("ReduceMax", input, axes_i=[dim], keepdims_i=1)) + + exp = g.op("Exp", input) + sum = symbolic_helper._reducesum_helper(g, exp, axes_i=[dim]) + softmax = g.op("Div", exp, sum) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + softmax = g.op( + "Cast", softmax, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + return softmax + + +@_onnx_symbolic("aten::softplus") +@_beartype.beartype +def softplus(g: jit_utils.GraphContext, self, beta, threshold): + beta_const = symbolic_helper._maybe_get_const(beta, "f") + if beta_const != 1: + return g.op("Div", g.op("Softplus", g.op("Mul", self, beta)), beta) + return g.op("Softplus", self) + + +@_onnx_symbolic("aten::get_pool_ceil_padding") +@_beartype.beartype +def get_pool_ceil_padding(input, kernel_size, stride, padding): + # TODO(justinchuby): Looks like this op is deprecated in torch + sizes = symbolic_helper._get_tensor_sizes(input) + dim = sizes[-len(padding) :] if sizes is not None else None + if dim is None or any(i is None for i in dim): + return symbolic_helper._unimplemented( + "get_pool_ceil_padding", "input size not accessible", input + ) + ceiled_output_dim = [ + int(math.ceil((dim[i] + 2 * padding[i] - kernel_size[i]) / float(stride[i]))) + + 1 + for i in range(0, len(padding)) + ] + # ensure last pooling starts inside + ceiled_output_dim = [ + ceiled_output_dim[i] - 1 + if (((ceiled_output_dim[i] - 1) * stride[i]) >= (dim[i] + padding[i])) + else ceiled_output_dim[i] + for i in range(0, len(ceiled_output_dim)) + ] + padding_ceil = [ + 0 + if (stride[i] == 1) + else ( + kernel_size[i] + - (dim[i] + 2 * padding[i] - ((ceiled_output_dim[i] - 1) * stride[i] + 1)) + ) + for i in range(0, len(padding)) + ] + # ensure padding is not > kernel_size + padding_ceil = [ + ( + int(padding_ceil[i]) + if padding_ceil[i] < kernel_size[i] - 1 + else int(kernel_size[i] - 1) + ) + if ((padding_ceil[i] + 2 * padding[i]) >= (kernel_size[i])) + else int(padding_ceil[i]) + for i in range(0, len(padding_ceil)) + ] + return padding_ceil + + +@_onnx_symbolic( + "aten::max_pool1d", + decorate=[ + _apply_params( + "max_pool1d", torch.nn.modules.utils._single, 1, return_indices=False + ), + _export("max_pool1d"), + ], +) +@_onnx_symbolic( + "aten::max_pool2d", + decorate=[ + _apply_params( + "max_pool2d", torch.nn.modules.utils._pair, 2, return_indices=False + ), + _export("max_pool2d"), + ], +) +@_onnx_symbolic( + "aten::max_pool3d", + decorate=[ + _apply_params( + "max_pool3d", torch.nn.modules.utils._triple, 3, return_indices=False + ), + _export("max_pool3d"), + ], +) +@_beartype.beartype +def _max_pool(name, tuple_fn, ndims, return_indices): + @symbolic_helper.quantized_args(True, False, False, False, False, False) + @symbolic_helper.parse_args("v", "is", "is", "is", "is", "i") + @_beartype.beartype + def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode): + if set(tuple_fn(dilation)) != {1}: + return symbolic_helper._unimplemented(name, "dilation", input) + if not stride: + stride = kernel_size + padding = tuple(tuple_fn(padding)) + if ceil_mode: + padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding) + padding = padding + tuple(a + b for (a, b) in zip(padding_ceil, padding)) + else: + padding = padding * 2 + kwargs = { + "kernel_shape_i": tuple_fn(kernel_size), + "pads_i": padding, + "strides_i": tuple_fn(stride), + } + # easy but hacky way to get flattened indices values + # to be used to convert the indices values to non-flattened. + # In ONNX the indices are computed as a flatten 1-D tensor, + # so the values in indices are in [0, N x C x D1 x ... x Dn). + # To convert the indices to the same format used by Pytorch, + # we first execute a maxpool with a kernel and stride of 1 on the same input. + # This will result in a tensor of indices in which each index will have it's own value. + # Using this tensor as a reference, we extract the first index of each axis and subtract + # it from each index of this axis in the indices to convert. + # This step will result in a tensor were each dimension has values of indices within + # the dimension it is in. + # For more information : + # https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407 + if return_indices: + r, indices = g.op("MaxPool", input, outputs=2, **kwargs) + _, flattened_indices = g.op( + "MaxPool", + input, + outputs=2, + kernel_shape_i=[1 for _ in range(ndims)], + strides_i=[1 for _ in range(ndims)], + ) + # convert indices to have non-flattened indices values + s = symbolic_helper._slice_helper( + g, + flattened_indices, + axes=[2 + i for i in range(ndims)], + starts=list(tuple_fn(0)), + ends=list(tuple_fn(1)), + ) + indices = sub(g, indices, s) + return r, indices + else: + r = g.op("MaxPool", input, outputs=1, **kwargs) + return r + + return symbolic_fn + + +max_pool1d_with_indices = _onnx_symbolic("aten::max_pool1d_with_indices")( + _max_pool( + "max_pool1d_with_indices", + torch.nn.modules.utils._single, + 1, + return_indices=True, + ) +) +max_pool2d_with_indices = _onnx_symbolic("aten::max_pool2d_with_indices")( + _max_pool( + "max_pool2d_with_indices", + torch.nn.modules.utils._pair, + 2, + return_indices=True, + ) +) +max_pool3d_with_indices = _onnx_symbolic("aten::max_pool3d_with_indices")( + _max_pool( + "max_pool3d_with_indices", + torch.nn.modules.utils._triple, + 3, + return_indices=True, + ) +) + + +@_onnx_symbolic( + "aten::avg_pool1d", + decorate=[ + _apply_params("avg_pool1d", torch.nn.modules.utils._single), + _export("avg_pool1d"), + ], +) +@_onnx_symbolic( + "aten::avg_pool2d", + decorate=[ + _apply_params("avg_pool2d", torch.nn.modules.utils._pair), + _export("avg_pool2d"), + ], +) +@_onnx_symbolic( + "aten::avg_pool3d", + decorate=[ + _apply_params("avg_pool3d", torch.nn.modules.utils._triple), + _export("avg_pool3d"), + ], +) +@_beartype.beartype +def _avg_pool(name, tuple_fn): + @symbolic_helper.quantized_args(True) + @symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none") + @_beartype.beartype + def symbolic_fn( + g, + input: _C.Value, + kernel_size: Sequence[int], + stride: Sequence[int], + padding: Union[int, Sequence[int]], + ceil_mode: int, + count_include_pad: int, + divisor_override=None, + ): + if not stride: + stride = kernel_size + padding = symbolic_helper._avgpool_helper( + tuple_fn, padding, kernel_size, stride, divisor_override, name + ) + assert isinstance(padding, tuple) + adjusted_padding = padding + # Although onnx::AvgPool provides count_include_pad, + # The corner case of Average Pooling with ceil_mode on + # PyTorch allows sliding window go off bound, which leads to + # this accommodation. + # More detail on https://github.com/pytorch/pytorch/issues/57178 + if count_include_pad: + input = _op_with_optional_float_cast( + g, + "Pad", + input, + pads_i=((0,) * 2 + padding) * 2, + mode_s="constant", + value_f=0.0, + opset_before=11, + ) + adjusted_padding = (0,) * len(padding) + if ceil_mode: + padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding) + adjusted_padding = adjusted_padding + tuple( + a + b for (a, b) in zip(padding_ceil, adjusted_padding) + ) + else: + adjusted_padding = adjusted_padding * 2 + output = g.op( + "AveragePool", + input, + kernel_shape_i=tuple_fn(kernel_size), + strides_i=tuple_fn(stride), + pads_i=adjusted_padding, + ) + return output + + return symbolic_fn + + +@_onnx_symbolic( + "aten::adaptive_avg_pool1d", + decorate=[ + _apply_params( + "adaptive_avg_pool1d", "AveragePool", torch.nn.modules.utils._single + ), + _export("adaptive_avg_pool1d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_avg_pool2d", + decorate=[ + _apply_params( + "adaptive_avg_pool2d", "AveragePool", torch.nn.modules.utils._pair + ), + _export("adaptive_avg_pool2d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_avg_pool3d", + decorate=[ + _apply_params( + "adaptive_avg_pool3d", "AveragePool", torch.nn.modules.utils._triple + ), + _export("adaptive_avg_pool3d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_max_pool1d", + decorate=[ + _apply_params( + "adaptive_max_pool1d", + "MaxPool", + torch.nn.modules.utils._single, + max_pool1d_with_indices, + ), + _export("adaptive_max_pool1d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_max_pool2d", + decorate=[ + _apply_params( + "adaptive_max_pool2d", + "MaxPool", + torch.nn.modules.utils._pair, + max_pool2d_with_indices, + ), + _export("adaptive_max_pool2d"), + ], +) +@_onnx_symbolic( + "aten::adaptive_max_pool3d", + decorate=[ + _apply_params( + "adaptive_max_pool3d", + "MaxPool", + torch.nn.modules.utils._triple, + max_pool3d_with_indices, + ), + _export("adaptive_max_pool3d"), + ], +) +@_beartype.beartype +def _adaptive_pool(name, type, tuple_fn, fn=None): + @symbolic_helper.quantized_args(True, False) + @_beartype.beartype + def symbolic_fn(g, input, output_size): + # _adaptive_pool is supported for cases where output_size is 1 for all dimensions, + # by executing a GlobalPool. + # It is also supported for cases where the output size is a factor of the input size. + # For these cases the stride and kernel size are uniform along all the indices of + # the same dimension, which makes it possible to export it to ONNX. + # for MaxPool, GlobalMaxPool does not return indices, + # so we try using max_poolxd_with_indices, and if it is not possible + # (input is not a complete tensor or output size not factor of input size) + # then we call GlobalAveragePool and return None for the indices + output_size_value = output_size + try: + output_size = symbolic_helper._parse_arg(output_size, "is") + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + return symbolic_helper._onnx_unsupported( + "adaptive pooling, since output_size is not constant.", input + ) + if output_size == [1] * len(output_size) and type == "AveragePool": + return g.op("GlobalAveragePool", input) + sizes = symbolic_helper._get_tensor_sizes(input) + try: + dim = sizes[2:] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + dim = None + if dim is None or any(i is None for i in dim): + if output_size == [1] * len(output_size): + return g.op("GlobalMaxPool", input), None + return symbolic_helper._unimplemented( + name, "input size not accessible", input + ) + # verify if output size % input size = 0 for all dim + mod = [dim[i] % output_size[i] for i in range(0, len(dim))] + if mod != [0] * len(mod): + if output_size == [1] * len(output_size): + return g.op("GlobalMaxPool", input), None + return symbolic_helper._unimplemented( + name, "output size that are not factor of input size", output_size_value + ) + k = [int(dim[i] / output_size[i]) for i in range(0, len(dim))] + # call max_poolxd_with_indices to get indices in the output + if type == "MaxPool": + return fn(g, input, k, k, (0,) * len(dim), (1,) * len(dim), False) + output = g.op(type, input, kernel_shape_i=tuple_fn(k), strides_i=tuple_fn(k)) + return output + + return symbolic_fn + + +@_beartype.beartype +def _prepare_onnx_paddings(dim: int, pad): + """Generate paddings in ONNX order based on pad in pytorch. + Args: + dim: the dimension of the tensor. + pad: the paddings in pytorch. + The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ... + """ + # The desired order of paddings is + # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end. + # n is the dimension of input. + # assume zero-dimensions in the beginning + paddings = list(pad[:]) + [0] * (dim * 2 - len(pad)) + # reverse order and collate first beginnings and then ends + paddings = paddings[-2::-2] + paddings[-1::-2] + return paddings + + +@_beartype.beartype +def _convert_padding_node(input): + padding = symbolic_helper._maybe_get_const(input, "is") + if symbolic_helper._is_value(padding) and symbolic_helper._is_packed_list(padding): + input_list = symbolic_helper._unpack_list(padding) + try: + padding = [ + symbolic_helper._get_const(v, "i", "padding") for v in input_list + ] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + return symbolic_helper._onnx_opset_unsupported_detailed( + "Pad", 9, 11, "The sizes of the padding must be constant", input + ) + return padding + + +@_onnx_symbolic("aten::constant_pad_nd") +@_beartype.beartype +def constant_pad_nd(g: jit_utils.GraphContext, input, padding, value): + mode = "constant" + try: + value = symbolic_helper._get_const(value, "f", "value") + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + return symbolic_helper._onnx_opset_unsupported_detailed( + "Pad", 9, 11, "The value for the padding must be constant", value + ) + + padding = _convert_padding_node(padding) + paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding) + return _op_with_optional_float_cast( + g, "Pad", input, pads_i=paddings, mode_s=mode, value_f=value, opset_before=11 + ) + + +@_beartype.beartype +def _pad_circular(g: jit_utils.GraphContext, input: _C.Value, pad: _C.Value): + padding = _convert_padding_node(pad) + assert len(padding) % 2 == 0 + ndim = len(padding) // 2 + + cur = input + for idx in range(ndim): + pad_r = padding[-(2 * idx + 1)] + pad_l = padding[-(2 * idx + 2)] + tensors = [] + if pad_l > 0: + left = symbolic_helper._slice_helper( + g, cur, axes=[2 + idx], starts=[-(pad_l)], ends=[_constants.INT64_MAX] + ) + tensors.append(left) + + if pad_l < 0 or pad_r < 0: + start = builtins.max(0, -pad_l) + end = -(builtins.max(0, -pad_r)) + middle = symbolic_helper._slice_helper( + g, + cur, + axes=[2 + idx], + starts=[start], + ends=[end], + ) + tensors.append(middle) + else: + tensors.append(cur) + + if pad_r > 0: + right = symbolic_helper._slice_helper( + g, cur, axes=[2 + idx], starts=[0], ends=[pad_r] + ) + tensors.append(right) + + cur = g.op("Concat", *tensors, axis_i=(2 + idx)) + + return cur + + +@_onnx_symbolic("aten::reflection_pad1d") +@_onnx_symbolic("aten::reflection_pad2d") +@_onnx_symbolic("aten::reflection_pad3d") +@_beartype.beartype +def reflection_pad(g: jit_utils.GraphContext, input, padding): + mode = "reflect" + padding = _convert_padding_node(padding) + paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding) + return _op_with_optional_float_cast( + g, "Pad", input, pads_i=paddings, mode_s=mode, opset_before=11 + ) + + +@_onnx_symbolic("aten::replication_pad1d") +@_onnx_symbolic("aten::replication_pad2d") +@_onnx_symbolic("aten::replication_pad3d") +@_beartype.beartype +def replication_pad(g: jit_utils.GraphContext, input, padding): + mode = "edge" + padding = _convert_padding_node(padding) + paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding) + return _op_with_optional_float_cast( + g, "Pad", input, pads_i=paddings, mode_s=mode, opset_before=11 + ) + + +@_onnx_symbolic("aten::pad") +@_beartype.beartype +def pad( + g: jit_utils.GraphContext, + input: _C.Value, + pad: _C.Value, + mode: _C.Value, + value: _C.Value, +): + mode = symbolic_helper._parse_arg(mode, "s") + if mode == "replicate": + return replication_pad(g, input, pad) + elif mode == "reflect": + return reflection_pad(g, input, pad) + elif mode == "constant": + return constant_pad_nd(g, input, pad, value) + elif mode == "circular": + return _pad_circular(g, input, pad) + else: + raise errors.SymbolicValueError(f"Unrecognized padding mode {mode}", input) + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[ + _apply_params("upsample_nearest1d", 3, "nearest"), + _export("upsample_nearest1d"), + ], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[ + _apply_params("upsample_nearest2d", 4, "nearest"), + _export("upsample_nearest2d"), + ], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[ + _apply_params("upsample_nearest3d", 5, "nearest"), + _export("upsample_nearest3d"), + ], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[ + _apply_params("upsample_linear1d", 3, "linear"), + _export("upsample_linear1d"), + ], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[ + _apply_params("upsample_bilinear2d", 4, "linear"), + _export("upsample_bilinear2d"), + ], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[ + _apply_params("upsample_trilinear3d", 5, "linear"), + _export("upsample_trilinear3d"), + ], +) +@_beartype.beartype +def _interpolate(name: str, dim: int, interpolate_mode: str): + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = symbolic_helper._get_interpolate_attributes( + g, interpolate_mode, args + ) + symbolic_helper._interpolate_warning(interpolate_mode) + align_corners = symbolic_helper._maybe_get_scalar(align_corners) + if align_corners: + return symbolic_helper._unimplemented(name, "align_corners == True", input) + if scales is None: + scales = symbolic_helper._interpolate_size_to_scales( + g, input, output_size, dim + ) + return g.op("Upsample", input, scales, mode_s=interpolate_mode) + + return symbolic_fn + + +@_onnx_symbolic("aten::__interpolate") +@_beartype.beartype +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + scales, mode = symbolic_helper._interpolate_get_scales_and_mode( + g, input, size, scale_factor, mode, align_corners + ) + return g.op("Upsample", input, scales, mode_s=mode) + + +@_onnx_symbolic("aten::bitwise_not") +@_beartype.beartype +def bitwise_not(g: jit_utils.GraphContext, input): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise Not " + "for non-boolean input values", + input, + ) + return g.op("Not", input) + + +@_onnx_symbolic("aten::bitwise_or") +@_beartype.beartype +def bitwise_or(g, self, other): + if not symbolic_helper._is_bool(self): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values. self: ", + self, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values. other: ", + other, + ) + return g.op("Or", self, other) + + +@_beartype.beartype +def wrap_logical_op_with_cast_to(to_type): + def decorator(fn): + @functools.wraps(fn) + def wrap_with_cast(g, input, other): + to_cast_func = globals()[f"_cast_{to_type}"] + return fn(g, to_cast_func(g, input, False), to_cast_func(g, other, False)) + + return wrap_with_cast + + return decorator + + +@_beartype.beartype +def wrap_logical_op_with_negation(func: Callable) -> Callable: + @functools.wraps(func) + def wrap_with_not(g, input, other): + return g.op("Not", func(g, input, other)) + + return wrap_with_not + + +@_onnx_symbolic("aten::__not_") +@_beartype.beartype +def __not_(g: jit_utils.GraphContext, self): + if not symbolic_helper._is_bool(self): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise Not " + "for non-boolean input values", + self, + ) + return g.op("Not", self) + + +@_onnx_symbolic("aten::eq") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def eq(g: jit_utils.GraphContext, self, other): + if isinstance(self.type(), _C.DeviceObjType) and isinstance( + other.type(), _C.DeviceObjType + ): + # ONNX doesn't have devices, so consider them all to be equal. + # The no-op check for equality will get constant-folded. + return g.op("Constant", value_t=torch.tensor(True, dtype=torch.bool)) + self_node = self.node() + other_node = other.node() + if self_node.kind() == other_node.kind() == "onnx::Constant": + if self_node.kindOf("value") == other_node.kindOf("value") == "s": + # Exporting strings to ONNX is not supported. + # If both strings are constant, we can compare them directly. + # The no-op check for equality will get constant-folded. + return g.op( + "Constant", + value_t=torch.tensor( + self_node.s("value") == other_node.s("value"), + dtype=torch.bool, + ), + ) + + return g.op("Equal", self, other) + + +@_onnx_symbolic("aten::ne") +@symbolic_helper.quantized_args(True, True) +@wrap_logical_op_with_negation +@_beartype.beartype +def ne(g: jit_utils.GraphContext, self, other): + return eq(g, self, other) + + +@_onnx_symbolic("aten::gt") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def gt(g: jit_utils.GraphContext, input, other): + return _gt_impl(g, input, other) + + +@_beartype.beartype +def _gt_impl(g: jit_utils.GraphContext, input, other): + if symbolic_helper._is_bool(input) and symbolic_helper._is_bool(other): + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32) + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.INT32) + return g.op("Greater", input, other) + + +@_onnx_symbolic("aten::lt") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def lt(g: jit_utils.GraphContext, input, other): + return _lt_impl(g, input, other) + + +@_beartype.beartype +def _lt_impl(g: jit_utils.GraphContext, input, other): + if symbolic_helper._is_bool(input) and symbolic_helper._is_bool(other): + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32) + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.INT32) + return g.op("Less", input, other) + + +@_onnx_symbolic("aten::ge") +@symbolic_helper.quantized_args(True, True) +@wrap_logical_op_with_negation +@_beartype.beartype +def ge(g: jit_utils.GraphContext, input, other): + return _lt_impl(g, input, other) + + +@_onnx_symbolic("aten::le") +@symbolic_helper.quantized_args(True, True) +@wrap_logical_op_with_negation +@_beartype.beartype +def le(g: jit_utils.GraphContext, input, other): + return _gt_impl(g, input, other) + + +@_onnx_symbolic("aten::__and_") +@_beartype.beartype +def __and_(g: jit_utils.GraphContext, input, other): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise AND " + "for non-boolean input values", + input, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise AND " + "for non-boolean input values", + other, + ) + return g.op("And", input, other) + + +@_onnx_symbolic("aten::__or_") +@_beartype.beartype +def __or_(g: jit_utils.GraphContext, input, other): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values", + input, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values", + other, + ) + return g.op("Or", input, other) + + +@_onnx_symbolic("aten::__xor_") +@_beartype.beartype +def __xor_(g: jit_utils.GraphContext, input, other): + if not symbolic_helper._is_bool(input): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise XOR " + "for non-boolean input values", + input, + ) + if not symbolic_helper._is_bool(other): + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting bitwise XOR " + "for non-boolean input values", + other, + ) + return g.op("Xor", input, other) + + +@_onnx_symbolic("aten::logical_and") +@wrap_logical_op_with_cast_to("Bool") +@_beartype.beartype +def logical_and(g: jit_utils.GraphContext, input, other): + return g.op("And", input, other) + + +@_onnx_symbolic("aten::logical_or") +@wrap_logical_op_with_cast_to("Bool") +@_beartype.beartype +def logical_or(g: jit_utils.GraphContext, input, other): + return g.op("Or", input, other) + + +@_onnx_symbolic("aten::logical_xor") +@wrap_logical_op_with_cast_to("Bool") +@_beartype.beartype +def logical_xor(g: jit_utils.GraphContext, input, other): + return g.op("Xor", input, other) + + +@_onnx_symbolic("aten::logical_not") +@_beartype.beartype +def logical_not(g: jit_utils.GraphContext, input): + return g.op("Not", g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.BOOL)) + + +@_onnx_symbolic("aten::__rshift_") +@_beartype.beartype +def __rshift_(g: jit_utils.GraphContext, self, other): + # make sure to cast other to self's type + # (when self is long, make sure that other is not float) + self_scalar_type = _type_utils.JitScalarType.from_value(self) + if ( + _type_utils.JitScalarType.from_value(other, _type_utils.JitScalarType.UNDEFINED) + != self_scalar_type + ): + other = g.op( + "Cast", + other, + to_i=self_scalar_type.onnx_type(), + ) + + two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32)) + # exponent (same type as self) has to be float or double in onnx::Pow + if not symbolic_helper._is_fp(self): + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT) + two_pow = g.op("Pow", two, other) + two_pow = g.op( + "Cast", + two_pow, + to_i=self_scalar_type.onnx_type(), + ) + rshift = g.op("Div", self, two_pow) + return rshift + + +@_onnx_symbolic("aten::__lshift_") +@_beartype.beartype +def __lshift_(g: jit_utils.GraphContext, self, other): + # make sure to cast other to self's type + # (when self is long, make sure that other is not float) + self_scalar_type = _type_utils.JitScalarType.from_value(self) + if ( + _type_utils.JitScalarType.from_value(other, _type_utils.JitScalarType.UNDEFINED) + != self_scalar_type + ): + other = g.op( + "Cast", + other, + to_i=self_scalar_type.onnx_type(), + ) + + two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32)) + # exponent (same type as self) has to be float or double in onnx::Pow + if not symbolic_helper._is_fp(self): + other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT) + two_pow = g.op("Pow", two, other) + two_pow = g.op( + "Cast", + two_pow, + to_i=self_scalar_type.onnx_type(), + ) + lshift = g.op("Mul", self, two_pow) + return lshift + + +@_onnx_symbolic("aten::where") +@symbolic_helper.parse_args("v", "v", "v", "i") +@_beartype.beartype +def where(g: jit_utils.GraphContext, condition, self=None, other=None, _outputs=None): + # Assumes that torch.where's first argument takes only Bool and Byte tensors. + if not symbolic_helper._is_bool(condition): + condition = g.op("Cast", condition, to_i=_C_onnx.TensorProtoDataType.BOOL) + if self is None: + condition = nonzero(g, condition) + return symbolic_helper._unbind_helper( + g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs + ) + return g.op("Where", condition, self, other) + + +@_onnx_symbolic("aten::log_softmax") +@symbolic_helper.parse_args("v", "i", "none") +@_beartype.beartype +def log_softmax(g: jit_utils.GraphContext, input, dim, dtype=None): + # PyTorch dim and ONNX axis have different meanings. + # See Softmax comment for details. + # TODO: remove this as onnx opset 11 spec allows negative axes + input_dim = symbolic_helper._get_tensor_rank(input) + if input_dim is None: + return symbolic_helper._unimplemented( + "dim", + "ONNX and PyTorch use different strategies to split the input. " + "Input rank must be known at export time.", + ) + if dim < 0: + dim = input_dim + dim + is_transpose_required = input_dim != dim + 1 + # ONNX only supports log_softmax with dim = -1. Transpose must be added before and after log_softmax to support other cases. + if is_transpose_required: + axes = list(range(input_dim)) + axes[dim], axes[-1] = axes[-1], axes[dim] + input = g.op("Transpose", input, perm_i=axes) + dim = input_dim - 1 + return_op = g.op("LogSoftmax", input, axis_i=dim) + if dtype and dtype.node().kind() != "prim::Constant": + parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype") + return_op = g.op( + "Cast", return_op, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type() + ) + if is_transpose_required: + return_op = g.op("Transpose", return_op, perm_i=axes) # type: ignore[possibly-undefined] + return return_op + + +@_onnx_symbolic("aten::_log_softmax") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def _log_softmax(g: jit_utils.GraphContext, input, dim, half_to_float): + if ( + half_to_float + and _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.UNDEFINED + ) + == _type_utils.JitScalarType.HALF + ): + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return log_softmax(g, input, dim) + + +@_onnx_symbolic("aten::_convolution") +@symbolic_helper.parse_args( + "v", "v", "v", "is", "is", "is", "i", "is", "i", "i", "i", "i", "i" +) +@_beartype.beartype +def _convolution( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + benchmark, + deterministic, + cudnn_enabled, + allow_tf32=None, +): + weight_size = symbolic_helper._get_tensor_sizes(weight) + try: + kernel_shape = weight_size[2:] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + kernel_shape = None + + if kernel_shape is None or any(i is None for i in kernel_shape): + raise errors.SymbolicValueError( + "Unsupported: ONNX export of convolution for kernel of unknown shape.", + input, + ) + + args = [input, weight] + # ONNX only supports 1D bias + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) == 1 + ): + args.append(bias) + + kwargs = { + "kernel_shape_i": weight_size[2:], + "strides_i": stride, + # NB: ONNX supports asymmetric padding, whereas PyTorch supports only + # symmetric padding + "pads_i": padding + padding, + "dilations_i": dilation, + "group_i": groups, + } + + if any(o != 0 for o in output_padding): + # ONNX supports both output_shape and output_padding. they are equivalent expressive. + # output_padding is more straightforward, so we use it here. + # output_shape = stride * (input_shape - 1) + output_padding + kernel_shape - padding * 2 + assert transposed + assert len(stride) == len(output_padding) + kwargs["output_padding_i"] = output_padding + + n = g.op("ConvTranspose" if transposed else "Conv", *args, **kwargs) + + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) != 1 + ): + return g.op("Add", n, bias) + else: + return n + + +@_onnx_symbolic("aten::_convolution_mode") +@symbolic_helper.parse_args( + "v", + "v", + "v", + "is", + "s", + "is", + "i", +) +@_beartype.beartype +def _convolution_mode( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + groups, +): + weight_size = symbolic_helper._get_tensor_sizes(weight) + try: + kernel_shape = weight_size[2:] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + kernel_shape = None + + if kernel_shape is None or any(i is None for i in kernel_shape): + raise errors.SymbolicValueError( + "Unsupported: ONNX export of convolution for kernel of unknown shape.", + input, + ) + + args = [input, weight] + # ONNX only supports 1D bias + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) == 1 + ): + args.append(bias) + + if padding == "valid": + padding = "VALID" + elif padding == "same": + padding = "SAME_UPPER" + kwargs = { + "kernel_shape_i": weight_size[2:], + "strides_i": stride, + "auto_pad_s": padding, + "dilations_i": dilation, + "group_i": groups, + } + + n = g.op("Conv", *args, **kwargs) + + if ( + not symbolic_helper._is_none(bias) + and symbolic_helper._get_tensor_rank(bias) != 1 + ): + return g.op("Add", n, bias) + else: + return n + + +@_onnx_symbolic("aten::convolution") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is", "i") +@_beartype.beartype +def convolution( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv1d") +@symbolic_helper.parse_args("v", "v", "v", "is", "v", "is", "i") +@_beartype.beartype +def conv1d( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + str_padding = symbolic_helper._parse_arg(padding, "s") + if str_padding in ["valid", "same"]: + return _convolution_mode( + g, + input, + weight, + bias, + stride, + str_padding, + dilation, + groups, + ) + else: + padding = symbolic_helper._parse_arg(padding, "is") + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + False, + (), + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv2d") +@symbolic_helper.parse_args("v", "v", "v", "is", "v", "is", "i") +@_beartype.beartype +def conv2d( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + str_padding = symbolic_helper._parse_arg(padding, "s") + if str_padding in ["valid", "same"]: + return _convolution_mode( + g, + input, + weight, + bias, + stride, + str_padding, + dilation, + groups, + ) + else: + padding = symbolic_helper._parse_arg(padding, "is") + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + False, + (), + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv3d") +@symbolic_helper.parse_args("v", "v", "v", "is", "v", "is", "i") +@_beartype.beartype +def conv3d( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + str_padding = symbolic_helper._parse_arg(padding, "s") + if str_padding in ["valid", "same"]: + return _convolution_mode( + g, + input, + weight, + bias, + stride, + str_padding, + dilation, + groups, + ) + else: + padding = symbolic_helper._parse_arg(padding, "is") + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + False, + (), + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv_transpose1d") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is") +@_beartype.beartype +def conv_transpose1d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + output_padding, + groups, + dilation, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + True, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv_transpose2d") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is") +@_beartype.beartype +def conv_transpose2d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + output_padding, + groups, + dilation, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + True, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::conv_transpose3d") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is") +@_beartype.beartype +def conv_transpose3d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + output_padding, + groups, + dilation, +): + return _convolution( + g, + input, + weight, + bias, + stride, + padding, + dilation, + True, + output_padding, + groups, + None, + None, + None, + None, + ) + + +@_onnx_symbolic("aten::batch_norm") +@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i") +@_beartype.beartype +def batch_norm( + g: jit_utils.GraphContext, + input, + weight, + bias, + running_mean, + running_var, + training, + momentum, + eps, + cudnn_enabled, +): + symbolic_helper.check_training_mode(training, "batch_norm") + + if ( + torch.is_autocast_enabled() + and not symbolic_helper.args_have_same_dtype( + [input, weight, bias, running_mean, running_var] + ) + and GLOBALS.export_onnx_opset_version < 15 + ): + return symbolic_helper._onnx_opset_unsupported_detailed( + "BatchNormalization", + 9, + 15, + "All input tensors must have the same `dtype`." + " Turn off Autocast or export using opset version 15.", + input, + ) + + weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper( + g, input, weight, bias, running_mean, running_var + ) + out = g.op( + "BatchNormalization", + input, + weight, + bias, + running_mean, + running_var, + epsilon_f=eps, + momentum_f=1 - momentum, + outputs=1 if not training else 5, + ) + if not training: + return out + else: + res, new_running_mean, new_running_var, saved_mean, saved_var = out + new_running_mean.setType(running_mean.type()) + new_running_var.setType(running_var.type()) + saved_mean.setDebugName("batch_norm_dead_output-" + saved_mean.debugName()) + saved_var.setDebugName("batch_norm_dead_output-" + saved_var.debugName()) + return res + + +@_onnx_symbolic("aten::native_layer_norm") +@symbolic_helper.quantized_args(True, False, False, False) +@symbolic_helper.parse_args("v", "is", "v", "v", "f") +@_beartype.beartype +def native_layer_norm( + g: jit_utils.GraphContext, + input: _C.Value, + normalized_shape: Sequence[int], + weight: _C.Value, + bias: _C.Value, + eps: float, +) -> Tuple[_C.Value, _C.Value, _C.Value]: + axes = [-i for i in range(len(normalized_shape), 0, -1)] + + two_cst = symbolic_helper._generate_wrapped_number(g, 2.0) + eps_cst = symbolic_helper._generate_wrapped_number(g, eps) + + mean = g.op("ReduceMean", input, axes_i=axes) + numerator = sub(g, input, mean) + + # Cast it to eps dtype to avoid precision loss + is_type_half = ( + _type_utils.JitScalarType.from_value(numerator) + == _type_utils.JitScalarType.HALF + ) + if is_type_half: + eps_dtype = _type_utils.JitScalarType.from_value(eps_cst) + numerator = g.op( + "Cast", numerator, to_i=_type_utils.JitScalarType(eps_dtype).onnx_type() + ) + + # variance = e((x - e(x))^2), and (x - e(x)) is the numerator in the layer_norm formula + variance = g.op("ReduceMean", pow(g, numerator, two_cst), axes_i=axes) + denominator = sqrt(g, g.op("Add", variance, eps_cst)) + normalized = g.op("Div", numerator, denominator) + + # Cast back to input type as eps related ops are all done + if is_type_half: + input_dtype = _type_utils.JitScalarType.from_value(input) + normalized = g.op( + "Cast", normalized, to_i=_type_utils.JitScalarType(input_dtype).onnx_type() + ) + + if not (weight is None or symbolic_helper._is_none(weight)): + normalized = mul(g, normalized, weight) + if not (bias is None or symbolic_helper._is_none(bias)): + normalized = add(g, normalized, bias) + + # rdenominator := 1 / sqrt(variance + eps) + # According to aten::native_layer_norm, rdenominator should have the same dtype as input, + # mean and normalized, so we need to Cast it back + if is_type_half: + denominator = g.op( + "Cast", denominator, to_i=_type_utils.JitScalarType(input_dtype).onnx_type() # type: ignore[possibly-undefined] + ) + rdenominator = g.op("Reciprocal", denominator) + else: + rdenominator = reciprocal(g, denominator) + + return normalized, mean, rdenominator + + +@_onnx_symbolic("aten::layer_norm") +@symbolic_helper.quantized_args(True, False, False, False) +@symbolic_helper.parse_args("v", "is", "v", "v", "f", "b") +@_beartype.beartype +def layer_norm( + g: jit_utils.GraphContext, + input: _C.Value, + normalized_shape: Sequence[int], + weight: _C.Value, + bias: _C.Value, + eps: float, + cudnn_enable: bool, +) -> _C.Value: + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "layer_norm", + input, + weight, + bias, + normalized_shape_i=normalized_shape, + eps_f=eps, + cudnn_enable_i=cudnn_enable, + ) + normalized, _, _ = native_layer_norm(g, input, normalized_shape, weight, bias, eps) + return normalized + + +@_onnx_symbolic("aten::instance_norm") +@symbolic_helper.parse_args("v", "v", "v", "v", "v", "b", "f", "f", "b") +@_beartype.beartype +def instance_norm( + g: jit_utils.GraphContext, + input, + weight, + bias, + running_mean, + running_var, + use_input_stats: bool, + momentum: Number, + eps: Number, + cudnn_enabled: bool, +): + symbolic_helper.check_training_mode(use_input_stats, "instance_norm") + channel_size = symbolic_helper._get_tensor_dim_size(input, 1) + if weight is None or symbolic_helper._is_none(weight): + if channel_size is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of instance_norm for unknown channel size.", + input, + ) + weight_value = torch.tensor( + [1.0] * channel_size, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ) + weight = g.op("Constant", value_t=weight_value) + if bias is None or symbolic_helper._is_none(bias): + if channel_size is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of instance_norm for unknown channel size.", + input, + ) + bias_value = torch.tensor( + [0.0] * channel_size, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ) + bias = g.op("Constant", value_t=bias_value) + if ( + running_mean is None + or symbolic_helper._is_none(running_mean) + or running_var is None + or symbolic_helper._is_none(running_var) + ): + return g.op("InstanceNormalization", input, weight, bias, epsilon_f=eps) + else: + input_size = symbolic_helper._get_tensor_sizes(input) + # If input shape is [N, C, H, W], reshape to [1, N * C, H, W] and call batch_norm. + # For more information instance_norm(): + # https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L542 + input_size_reshape = input_size.copy() + n = input_size[0] + if n is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of instance_norm training for unknown " + "batch size.", + input, + ) + c = input_size[1] + input_size_reshape[0] = 1 + input_size_reshape[1] = n * c + weight_ = repeat( + g, weight, g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)) + ) + bias_ = repeat( + g, bias, g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)) + ) + running_mean_ = repeat( + g, + running_mean, + g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)), + ) + running_var_ = repeat( + g, + running_var, + g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)), + ) + input_reshaped = g.op( + "Reshape", + input, + g.op("Constant", value_t=torch.LongTensor(input_size_reshape)), + ) + out = batch_norm( + g, + input_reshaped, + weight_, + bias_, + running_mean_, + running_var_, + use_input_stats, + momentum, + eps, + cudnn_enabled, + ) + return view(g, out, g.op("Constant", value_t=torch.tensor(input_size))) + + +@_onnx_symbolic("aten::unfold") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def unfold(g: jit_utils.GraphContext, input, dimension, size, step): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step) + sizes = symbolic_helper._get_tensor_sizes(input) + # FIXME(justinchuby): Get rid of the try catch here to improve readability + try: + sizedim = sizes[dimension] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + sizedim = None + if sizedim is not None: + low_indices = range(0, sizedim, step) + hi_indices = range(size, sizedim + 1, step) + stack = [ + symbolic_helper._slice_helper( + g, input, axes=[dimension], starts=[low], ends=[hi] + ) + for low, hi in zip(low_indices, hi_indices) + ] + ndim = len(sizes) + perm = list(range(0, ndim)) + perm.append(perm.pop(dimension)) + unsqueeze = [ + symbolic_helper._unsqueeze_helper( + g, g.op("Transpose", t, perm_i=perm), [dimension] + ) + for t in stack + ] + return g.op("Concat", *unsqueeze, axis_i=dimension) + else: + return symbolic_helper._unimplemented( + "Unfold", "input size not accessible", input + ) + + +@_onnx_symbolic("aten::elu") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "t", "t", "t") +@_beartype.beartype +def elu(g: jit_utils.GraphContext, input, alpha, scale, input_scale): + if scale and scale != 1.0: + return symbolic_helper._unimplemented( + "scale", "does not support scale in Elu", scale + ) + if input_scale and input_scale != 1.0: + return symbolic_helper._unimplemented( + "input_scale", "does not support input_scale in Elu", input_scale + ) + # See Note [Export inplace] + return g.op("Elu", input, alpha_f=symbolic_helper._scalar(alpha)) + + +@_onnx_symbolic("aten::selu") +@symbolic_helper.quantized_args(True) +@_beartype.beartype +def selu(g: jit_utils.GraphContext, input): + return g.op("Selu", input) + + +@_onnx_symbolic("aten::index_select") +@symbolic_helper.parse_args("v", "i", "v") +@_beartype.beartype +def index_select(g: jit_utils.GraphContext, self, dim, index): + # In case of a scalar index, index_select returns a tensor with the same rank as the input. + # To match this behavior in ONNX, we make index a 1D tensor so that the following gather + # also produces a tensor with the same rank as the input. + return symbolic_helper._select_helper(g, self, dim, index) + + +@_onnx_symbolic("aten::index_put") +@_beartype.beartype +def index_put(g: jit_utils.GraphContext, self, indices_list_value, values, accumulate): + if symbolic_helper._is_packed_list(indices_list_value): + indices_list = symbolic_helper._unpack_list(indices_list_value) + else: + indices_list = [indices_list_value] + if symbolic_helper.is_caffe2_aten_fallback(): + args = [self] + indices_list + [values, accumulate] + return g.at("index_put", *args) + + accumulate = symbolic_helper._parse_arg(accumulate, "b") + + if len(indices_list) == 0: + if accumulate: + return add(g, self, values) + return values + symbolic_helper._onnx_opset_unsupported("index_put", 9, 11, self) + + +@_onnx_symbolic("aten::index_fill") +@_beartype.beartype +def index_fill(g: jit_utils.GraphContext, self, dim, index, value): + dim_value = symbolic_helper._parse_arg(dim, "i") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "index_fill", + self, + index, + value, + overload_name="int_Scalar", + dim_i=dim_value, + ) + + expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper( + g, self, dim, index + ) + value = symbolic_helper._maybe_get_scalar(value) + value = symbolic_helper._if_scalar_type_as(value, self) + expanded_value = expand(g, value, expanded_index_shape, None) + + return scatter(g, self, dim, expanded_index, expanded_value) + + +@_onnx_symbolic("aten::index_copy") +@_beartype.beartype +def index_copy(g: jit_utils.GraphContext, self, dim, index, source): + dim_value = symbolic_helper._parse_arg(dim, "i") + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("index_copy", self, index, source, dim_i=dim_value) + expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper( + g, self, dim, index + ) + return scatter(g, self, dim, expanded_index, source) + + +@_onnx_symbolic("aten::bucketize") +@symbolic_helper.parse_args("v", "v", "b", "b") +@_beartype.beartype +def bucketize( + g: jit_utils.GraphContext, self, boundaries, out_int32=False, right=False +): + out_type = _C_onnx.TensorProtoDataType.INT64 + if out_int32: + out_type = _C_onnx.TensorProtoDataType.INT32 + # A tensor expanded_boundaries is created such that it + # contains a copy of boundaries for each element of self. + new_shape = g.op("Concat", g.op("Shape", boundaries), g.op("Shape", self), axis_i=0) + # Unsqueeze step is performed to respect ONNX's numpy style broadcasting for comparison ops + # https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md + tensor_rank = symbolic_helper._get_tensor_rank(self) + assert tensor_rank is not None + unsqueeze_axes = list(range(1, tensor_rank + 1)) + expanded_boundaries = expand( + g, + symbolic_helper._unsqueeze_helper(g, boundaries, unsqueeze_axes), + new_shape, + None, + ) + # Compare each element of self to boundaries to get a tensor + # with leading 1s and trailing 0s. + # e.g., 4 > [1, 3, 4] = [1, 1, 0] + # The index of the last 1 is the bucket where the element should go. + if right: + cond = ge(g, self, expanded_boundaries) + else: + cond = gt(g, self, expanded_boundaries) + cond_out = g.op("Cast", cond, to_i=out_type) + # Sum to get the number of 1s corresponding to each element, + # which is the same as the bucket index. + # e.g., sum(4 > [1, 3, 4]) = sum([1, 1, 0]) = 2 + return symbolic_helper._reducesum_helper(g, cond_out, axes_i=[0], keepdims_i=0) + + +@_onnx_symbolic("aten::type_as") +@_beartype.beartype +def type_as(g: jit_utils.GraphContext, self, other): + self_dtype = symbolic_helper._try_get_scalar_type(self) + other_dtype = symbolic_helper._try_get_scalar_type(other) + if self_dtype == other_dtype and self_dtype is not None: + return self + if other_dtype is not None: + return g.op( + "Cast", + self, + to_i=other_dtype.onnx_type(), + ) + + if symbolic_helper.is_caffe2_aten_fallback(): + # We don't know the type of other, bail by emitting ATen + return g.at("type_as", self, other) + + raise errors.SymbolicValueError( + "Unsupported: ONNX export of type_as for tensor " + "of unknown dtype. Please check if the dtype of the " + "parameter passed to the type_as function is correct.", + other, + ) + + +@_onnx_symbolic("aten::cosine_similarity") +@symbolic_helper.parse_args("v", "v", "i", "f") +@_beartype.beartype +def cosine_similarity(g: jit_utils.GraphContext, x1, x2, dim, eps): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("cosine_similarity", x1, x2, dim_i=dim, eps_f=eps) + cross = symbolic_helper._reducesum_helper( + g, mul(g, x1, x2), axes_i=[dim], keepdims_i=0 + ) + x1_l2 = symbolic_helper._reducesum_helper( + g, mul(g, x1, x1), axes_i=[dim], keepdims_i=0 + ) + x2_l2 = symbolic_helper._reducesum_helper( + g, mul(g, x2, x2), axes_i=[dim], keepdims_i=0 + ) + div_tens = max( + g, sqrt(g, mul(g, x1_l2, x2_l2)), g.op("Constant", value_t=torch.tensor([eps])) + ) + return div(g, cross, div_tens) + + +@_onnx_symbolic("aten::pairwise_distance") +@_beartype.beartype +def pairwise_distance(g: jit_utils.GraphContext, input1, input2, p, eps, keepdim): + if not symbolic_helper._is_value(eps): + eps = g.op("Constant", value_t=torch.tensor([eps])) + inv_p = div( + g, + g.op("Constant", value_t=torch.tensor([1], dtype=torch.float)), + add(g, p, eps), + ) + summation = symbolic_helper._reducesum_helper( + g, + pow(g, sub(g, input1, input2), p), + axes_i=[-1], + keepdims_i=symbolic_helper._parse_arg(keepdim, "i"), + ) + return pow(g, summation, inv_p) + + +@_onnx_symbolic("aten::clone") +# ignore clone operators that are inserted by PyTorch autograd +@_beartype.beartype +def clone(g: jit_utils.GraphContext, input, unused_memory_format): + return input + + +@_onnx_symbolic("aten::abs") +@_beartype.beartype +def abs(g: jit_utils.GraphContext, self): + return g.op("Abs", self) + + +@_onnx_symbolic("aten::log") +@_beartype.beartype +def log(g: jit_utils.GraphContext, self): + return g.op("Log", self) + + +@_onnx_symbolic("aten::log1p") +@_beartype.beartype +def log1p(g: jit_utils.GraphContext, self): + return log(g, add(g, symbolic_helper._if_scalar_type_as(torch.ones(1), self), self)) + + +@_onnx_symbolic("aten::log10") +@_beartype.beartype +def log10(g: jit_utils.GraphContext, self): + _ln10 = 2.30258509299404568401 + return g.op("Div", log(g, self), g.op("Constant", value_t=torch.tensor([_ln10]))) + + +@_onnx_symbolic("aten::pow") +@_beartype.beartype +def pow(g: jit_utils.GraphContext, self, exponent): + f_dtype = _type_utils.JitScalarType.from_value(self) + if not symbolic_helper._is_fp(self): + f_dtype = _type_utils.JitScalarType.FLOAT + self = g.op("Cast", self, to_i=f_dtype.onnx_type()) + if not symbolic_helper._is_fp(exponent): + exponent = g.op( + "Cast", + exponent, + to_i=f_dtype.onnx_type(), + ) + pow = g.op("Pow", self, exponent) + return pow + + +@_onnx_symbolic("aten::clamp") +@_beartype.beartype +def clamp(g: jit_utils.GraphContext, self, min, max): + # min or max may be None that we need to dispatch to + # Clip separately, as ONNX does not have None syntax + if symbolic_helper._is_none(min): + return clamp_max(g, self, max) + elif symbolic_helper._is_none(max): + return clamp_min(g, self, min) + else: + if symbolic_helper._is_constant(min) and symbolic_helper._is_constant(max): + return _op_with_optional_float_cast( + g, + "Clip", + self, + min_f=symbolic_helper._parse_arg(min, "f"), + max_f=symbolic_helper._parse_arg(max, "f"), + opset_before=12, + ) + else: + return clamp_max(g, clamp_min(g, self, min), max) + + +@_onnx_symbolic("aten::clamp_min") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def clamp_min(g: jit_utils.GraphContext, self, min): + if symbolic_helper._is_constant(min): + return _op_with_optional_float_cast( + g, "Clip", self, min_f=symbolic_helper._parse_arg(min, "f"), opset_before=12 + ) + else: + dtype = _type_utils.JitScalarType.from_value(self) + min = g.op("Cast", min, to_i=dtype.onnx_type()) + return _op_with_optional_float_cast(g, "Max", self, min, opset_before=12) + + +@_onnx_symbolic("aten::clamp_max") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def clamp_max(g: jit_utils.GraphContext, self, max): + if symbolic_helper._is_constant(max): + return _op_with_optional_float_cast( + g, "Clip", self, max_f=symbolic_helper._parse_arg(max, "f"), opset_before=12 + ) + else: + dtype = _type_utils.JitScalarType.from_value(self) + max = g.op("Cast", max, to_i=dtype.onnx_type()) + return _op_with_optional_float_cast(g, "Min", self, max, opset_before=12) + + +@_onnx_symbolic("aten::max") +# torch.max (same for torch.min) actually has two interfaces smashed together: +# torch.max(x, dim, keepdim) and torch.max(x, y) +# TODO(justinchuby): Support multiple quantized args in output +@_beartype.beartype +def max(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None): + # torch.max(input) + if dim_or_y is None and keepdim is None: + return g.op("ReduceMax", self, keepdims_i=0) + # torch.max(input, other) + if keepdim is None: + return _op_with_optional_float_cast(g, "Max", self, dim_or_y, opset_before=12) + # torch.max(input, dim, keepdim) + else: + dim = symbolic_helper._get_const(dim_or_y, "i", "dim") + keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim") + max = g.op("ReduceMax", self, axes_i=[dim], keepdims_i=keepdim) + indices = g.op("ArgMax", self, axis_i=dim, keepdims_i=keepdim) + return max, indices + + +@_onnx_symbolic("aten::maximum") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def maximum(g: jit_utils.GraphContext, input, other): + return max(g, input, dim_or_y=other) + + +@_onnx_symbolic("aten::min") +# TODO(justinchuby): Support multiple quantized args in output +@_beartype.beartype +def min(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None): + # torch.min(input) + if dim_or_y is None and keepdim is None: + return g.op("ReduceMin", self, keepdims_i=0) + # torch.min(input, other) + if keepdim is None: + return _op_with_optional_float_cast(g, "Min", self, dim_or_y, opset_before=12) + # torch.min(input, dim, keepdim) + else: + dim = symbolic_helper._get_const(dim_or_y, "i", "dim") + keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim") + min = g.op("ReduceMin", self, axes_i=[dim], keepdims_i=keepdim) + indices = g.op("ArgMin", self, axis_i=dim, keepdims_i=keepdim) + return min, indices + + +@_onnx_symbolic("aten::minimum") +@symbolic_helper.quantized_args(True, True) +@_beartype.beartype +def minimum(g: jit_utils.GraphContext, input, other): + return min(g, input, dim_or_y=other) + + +@_onnx_symbolic("aten::amax") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "is", "i") +@_beartype.beartype +def amax(g: jit_utils.GraphContext, self, dim, keepdim): + return g.op("ReduceMax", self, axes_i=dim, keepdims_i=keepdim) + + +@_onnx_symbolic("aten::amin") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "is", "i") +@_beartype.beartype +def amin(g: jit_utils.GraphContext, self, dim, keepdim): + return g.op("ReduceMin", self, axes_i=dim, keepdims_i=keepdim) + + +@_onnx_symbolic("aten::aminmax") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def aminmax(g: jit_utils.GraphContext, self, dim, keepdim): + reduce_kwargs = {"keepdims_i": keepdim} + if not symbolic_helper._is_none(dim): + dim = symbolic_helper._get_const(dim, "i", "dim") + reduce_kwargs["axes_i"] = [dim] + + return g.op("ReduceMin", self, **reduce_kwargs), g.op( + "ReduceMax", self, **reduce_kwargs + ) + + +@_onnx_symbolic("aten::exp") +@_beartype.beartype +def exp(g: jit_utils.GraphContext, self): + return g.op("Exp", self) + + +@_onnx_symbolic("aten::dropout_") +@_onnx_symbolic("aten::dropout") +@symbolic_helper.parse_args("v", "f", "i") +@_beartype.beartype +def dropout(g: jit_utils.GraphContext, input, p, train): + symbolic_helper.check_training_mode(train, "dropout") + # if train is False, dropout is no-op + if not train: + return input + r, _ = g.op("Dropout", input, ratio_f=p, outputs=2) + return r + + +@_onnx_symbolic( + "aten::alpha_dropout_", decorate=[_apply_params("aten::alpha_dropout_")] +) # See Note [Export inplace] +@_onnx_symbolic( + "aten::feature_alpha_dropout_", + decorate=[_apply_params("aten::feature_alpha_dropout_")], +) +@_onnx_symbolic( + "aten::feature_dropout_", decorate=[_apply_params("aten::feature_dropout_")] +) +@_onnx_symbolic( + "aten::feature_alpha_dropout", + decorate=[_apply_params("aten::feature_alpha_dropout")], +) +@_onnx_symbolic("aten::alpha_dropout", decorate=[_apply_params("aten::alpha_dropout")]) +@_onnx_symbolic( + "aten::feature_dropout", decorate=[_apply_params("aten::feature_dropout")] +) +@_beartype.beartype +def _unsupported_dropout(name: str): + @symbolic_helper.parse_args("v", "none", "b") + @_beartype.beartype + def feature_dropout(g, input, p, train): + # NB: In inference mode, FeatureDropout is exported as an identity op. + if train: + return symbolic_helper._unimplemented(name, "training mode", input) + return input + + return feature_dropout + + +@_onnx_symbolic("aten::norm") +@symbolic_helper.parse_args("v", "t", "is", "i", "v") +@_beartype.beartype +def norm(g: jit_utils.GraphContext, self, p, dim, keepdim, dtype=None): + if p == 1: + f = _reduce_op_symbolic("ReduceL1") + elif p == 2: + f = _reduce_op_symbolic("ReduceL2") + else: + raise errors.SymbolicValueError( + "ONNX export only p-norms with p of 1 or 2", self + ) + result = f(g, self, dim=dim, keepdim=keepdim) + if dtype is not None: + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + result = g.op("Cast", result, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + return result + + +@_onnx_symbolic("aten::conv_tbc") +@symbolic_helper.parse_args("v", "v", "v", "i") +@_beartype.beartype +def conv_tbc(g: jit_utils.GraphContext, input, weight, bias, pad): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("conv_tbc", input, weight, bias, pad_i=pad) + else: + # input must have 3 dimensions, see: + # https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/ConvolutionTBC.cpp#L8-L10 + # input = (time, batch, in_channels) + # weight = (kernel_width, in_channels, out_channels) + # bias = (out_channels,) + input = g.op("Transpose", input, perm_i=[1, 2, 0]) + weight = g.op("Transpose", weight, perm_i=[2, 1, 0]) + conv = conv1d(g, input, weight, bias, [1], [pad], [1], 1) + return g.op("Transpose", conv, perm_i=[2, 0, 1]) + + +@_onnx_symbolic("aten::_unique") +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def _unique(g: jit_utils.GraphContext, input, sorted, return_inverse): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "_unique", + input, + sorted_i=sorted, + return_inverse_i=return_inverse, + outputs=2, + ) + else: + return symbolic_helper._onnx_unsupported("_unique", input) + + +@_onnx_symbolic("aten::_unique2") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def _unique2(g: jit_utils.GraphContext, input, sorted, return_inverse, return_counts): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "_unique2", + input, + sorted_i=sorted, + return_inverse_i=return_inverse, + return_counts_i=return_counts, + outputs=3, + ) + + symbolic_helper._onnx_opset_unsupported("_unique2", 9, 11, input) + + +@_onnx_symbolic("aten::_cast_Byte") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Byte(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.UINT8) + + +@_onnx_symbolic("aten::_cast_Char") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Char(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT8) + + +@_onnx_symbolic("aten::_cast_Short") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Short(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT16) + + +@_onnx_symbolic("aten::_cast_Int") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Int(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32) + + +@_onnx_symbolic("aten::_cast_Long") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Long(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT64) + + +@_onnx_symbolic("aten::_cast_Half") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Half(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT16) + + +@_onnx_symbolic("aten::_cast_Float") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Float(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT) + + +@_onnx_symbolic("aten::_cast_Double") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Double(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.DOUBLE) + + +@_onnx_symbolic("aten::_cast_Bool") +@_deprecation.deprecated( + "2.0", + "the future", + "Avoid using this function and create a Cast node instead", +) +@_beartype.beartype +def _cast_Bool(g: jit_utils.GraphContext, input, non_blocking): + return g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.BOOL) + + +@_onnx_symbolic("aten::empty") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def empty( + g: jit_utils.GraphContext, + sizes, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + return zeros(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::empty_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def empty_like( + g: jit_utils.GraphContext, + input, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + return zeros_like(g, input, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::new_empty") +@_beartype.beartype +def new_empty( + g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return empty(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::scalar_tensor") +@_beartype.beartype +def scalar_tensor(g: jit_utils.GraphContext, scalar, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + dtype = _type_utils.JitScalarType.FLOAT + scalar = g.op("Cast", scalar, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + return scalar + + +@_onnx_symbolic("aten::tensor") +@_beartype.beartype +def tensor( + g: jit_utils.GraphContext, data, dtype=None, device=None, requires_grad=False +): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if symbolic_helper._is_packed_list(data): + if dtype is None: + dtype = _type_utils.JitScalarType.from_value( + symbolic_helper._unpack_list(data)[0] + ) + input_list = list() + for t in symbolic_helper._unpack_list(data): + shape_reference = g.op("Constant", value_t=torch.LongTensor([1])) + t = symbolic_helper._reshape_helper(g, t, shape_reference) + t = g.op("Cast", t, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + input_list.append(t) + return g.op("Concat", *input_list, axis_i=0) + else: + if dtype is None: + dtype = _type_utils.JitScalarType.from_value(data) + if symbolic_helper._is_list(data) and ( + symbolic_helper._is_tensor_list(data) + or symbolic_helper._is_scalar_list(data) + ): + data = g.op("ConcatFromSequence", data, axis_i=0, new_axis_i=1) + return g.op("Cast", data, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + + +@_onnx_symbolic("aten::as_tensor") +@_beartype.beartype +def as_tensor(g: jit_utils.GraphContext, data, dtype=None, device=None): + return tensor(g, data, dtype, device) + + +@_onnx_symbolic("aten::zeros") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +@_beartype.beartype +def zeros(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + # NOTE: no way to set device, layout and pin_memory in ONNX, so we ignore it + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + sizes_ = symbolic_helper._maybe_get_const(sizes, "is") + if isinstance(sizes_, list) and len(sizes_) == 0: + sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64)) + return g.op( + "ConstantOfShape", + sizes, + value_t=torch.tensor([0], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::zeros_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def zeros_like( + g: jit_utils.GraphContext, + input, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + if symbolic_helper._is_none(dtype): + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + return g.op( + "ConstantOfShape", + shape, + value_t=torch.tensor([0], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::new_zeros") +@_beartype.beartype +def new_zeros( + g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return zeros(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::zero") +@_beartype.beartype +def zero(g: jit_utils.GraphContext, self): + self_dtype = symbolic_helper._try_get_scalar_type(self) + return zeros_like(g, self, self_dtype) + + +@_onnx_symbolic("aten::ones") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +@_beartype.beartype +def ones(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + sizes_ = symbolic_helper._maybe_get_const(sizes, "is") + if isinstance(sizes_, list) and len(sizes_) == 0: + sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64)) + return g.op( + "ConstantOfShape", + sizes, + value_t=torch.tensor([1], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::ones_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +@_beartype.beartype +def ones_like( + g: jit_utils.GraphContext, + input, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + if symbolic_helper._is_none(dtype): + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + return g.op( + "ConstantOfShape", + shape, + value_t=torch.tensor([1], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::new_ones") +@_beartype.beartype +def new_ones( + g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return ones(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::full") +@_beartype.beartype +def full( + g: jit_utils.GraphContext, sizes, value, dtype, layout, device, pin_memory=False +): + const_value = symbolic_helper._maybe_get_const(value, "t") + if symbolic_helper._is_value(const_value): + dtype = _type_utils.JitScalarType.FLOAT if dtype is None else dtype + tmp = zeros(g, sizes, dtype, layout, device) + return add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1))) + else: + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + sizes_ = symbolic_helper._maybe_get_const(sizes, "is") + if isinstance(sizes_, list) and len(sizes_) == 0: + sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64)) + return g.op( + "ConstantOfShape", + sizes, + value_t=const_value.view(1).to(scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::full_like") +@_beartype.beartype +def full_like( + g: jit_utils.GraphContext, + input, + fill_value, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + fill_value = symbolic_helper._maybe_get_const(fill_value, "f") + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + if symbolic_helper._is_value(fill_value): + tmp = zeros_like(g, input, dtype, layout, device) + fill_value = g.op("Cast", fill_value, to_i=scalar_type.onnx_type()) + return add(g, tmp, fill_value, g.op("Constant", value_t=torch.tensor(1))) + else: + shape = g.op("Shape", input) + return g.op( + "ConstantOfShape", + shape, + value_t=torch.tensor([fill_value], dtype=scalar_type.dtype()), + ) + + +@_onnx_symbolic("aten::new_full") +@_beartype.beartype +def new_full( + g: jit_utils.GraphContext, + self, + size, + fill_value, + dtype, + layout, + device, + pin_memory=False, +): + self_dtype = symbolic_helper._try_get_scalar_type(self) + if symbolic_helper._is_none(dtype) and self_dtype is not None: + dtype = self_dtype + return full(g, size, fill_value, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::eye") +@_beartype.beartype +def eye(g: jit_utils.GraphContext, *args): + if len(args) == 5: + # aten::eye(n, dtype, layout, device, pin_memory) + n, dtype, layout, device, pin_memory = args + dim_size = symbolic_helper._unsqueeze_helper(g, n, [0]) + shape = g.op("Concat", dim_size, dim_size, axis_i=0) + tensor = zeros(g, shape, dtype, layout, device) + return g.op("EyeLike", tensor) + if len(args) == 6: + # aten::eye(n, m, dtype, layout, device, pin_memory) + n, m, dtype, layout, device, pin_memory = args + shape = g.op( + "Concat", + symbolic_helper._unsqueeze_helper(g, n, [0]), + symbolic_helper._unsqueeze_helper(g, m, [0]), + axis_i=0, + ) + tensor = zeros(g, shape, dtype, layout, device) + return g.op("EyeLike", tensor) + + return symbolic_helper._unimplemented("aten::eye", f"with {len(args)} arguments") + + +@_onnx_symbolic("aten::slice") +@_beartype.beartype +def slice(g: jit_utils.GraphContext, self, *args): + if len(args) == 4: + # aten::slice(Tensor self, int dim, int start, int end, int step) -> Tensor + dim, start, end, step = args + step = symbolic_helper._parse_arg(step, "i") + if step != 1: + raise errors.SymbolicValueError("step!=1 is currently not supported", self) + is_start_none = start.node().kind() == "prim::Constant" and isinstance( + start.type(), _C.NoneType + ) + is_end_none = end.node().kind() == "prim::Constant" and isinstance( + end.type(), _C.NoneType + ) + is_start_onnx_const = start.node().kind() == "onnx::Constant" + is_end_onnx_const = end.node().kind() == "onnx::Constant" + if ( + ((not is_start_none) and (not is_start_onnx_const)) + or ((not is_end_none) and (not is_end_onnx_const)) + or dim.node().kind() != "onnx::Constant" + ): + if GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of Slice with dynamic inputs. DynamicSlice " + "is a deprecated experimental op. Please use statically allocated " + "variables or export to a higher opset version.", + self, + ) + else: + start_unsqueezed = symbolic_helper._unsqueeze_helper(g, start, [0]) + end_unsqueezed = symbolic_helper._unsqueeze_helper(g, end, [0]) + dim_unsqueezed = symbolic_helper._unsqueeze_helper(g, dim, [0]) + return g.op( + "DynamicSlice", + self, + start_unsqueezed, + end_unsqueezed, + dim_unsqueezed, + ) + else: + start = 0 if is_start_none else symbolic_helper._parse_arg(start, "i") + end = ( + _constants.INT64_MAX + if is_end_none + else symbolic_helper._parse_arg(end, "i") + ) + dim = symbolic_helper._parse_arg(dim, "i") + return symbolic_helper._slice_helper( + g, self, axes=[dim], starts=[start], ends=[end] + ) + elif len(args) == 3: + # aten::slice(t[] l, int start, int end, int step) -> t[] + start, end, step = args + dim = 0 + is_start_none = start.node().kind() == "prim::Constant" and isinstance( + start.type(), _C.NoneType + ) + is_end_none = end.node().kind() == "prim::Constant" and isinstance( + end.type(), _C.NoneType + ) + start = 0 if is_start_none else symbolic_helper._parse_arg(start, "i") + end = ( + _constants.INT64_MAX + if is_end_none + else symbolic_helper._parse_arg(end, "i") + ) + return symbolic_helper._slice_helper( + g, self, axes=[dim], starts=[start], ends=[end] + ) + + return symbolic_helper._unimplemented("aten::slice", f"with {len(args)} arguments") + + +@_onnx_symbolic("aten::hardtanh") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "f", "f") +@_beartype.beartype +def hardtanh(g: jit_utils.GraphContext, self: _C.Value, min_val: float, max_val: float): + return _op_with_optional_float_cast( + g, "Clip", self, min_f=min_val, max_f=max_val, opset_before=12 + ) + + +@_onnx_symbolic("aten::hardswish") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v") +@_beartype.beartype +def hardswish(g: jit_utils.GraphContext, self): + hs = hardsigmoid(g, self) + return g.op("Mul", self, hs) + + +@_onnx_symbolic("aten::hardsigmoid") +# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp +@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0) +@symbolic_helper.parse_args("v") +@_beartype.beartype +def hardsigmoid(g: jit_utils.GraphContext, self): + # Set alpha_f to 1 / 6 to make op equivalent to PyTorch's definition of Hardsigmoid. + # See https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html + return g.op("HardSigmoid", self, alpha_f=1 / 6) + + +@_onnx_symbolic("aten::tanhshrink") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def tanhshrink(g: jit_utils.GraphContext, self): + return g.op("Sub", self, tanh(g, self)) + + +@_onnx_symbolic("aten::hardshrink") +@symbolic_helper.parse_args("v", "f") +@_beartype.beartype +def hardshrink(g: jit_utils.GraphContext, self, lambd): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + lambd_op = g.op( + "Constant", + value_t=torch.tensor(lambd, dtype=scalar_type.dtype()), + ) + cond = logical_or(g, gt(g, self, lambd_op), lt(g, self, neg(g, lambd_op))) + return g.op( + "Where", + cond, + self, + g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ), + ) + + +@_onnx_symbolic("aten::softshrink") +@symbolic_helper.parse_args("v", "f") +@_beartype.beartype +def softshrink(g: jit_utils.GraphContext, self, lambd): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + lambd_op = g.op( + "Constant", + value_t=torch.tensor(lambd, dtype=scalar_type.dtype()), + ) + gt_cond = gt(g, self, lambd_op) + gt_out = g.op( + "Where", + gt_cond, + sub(g, self, lambd_op), + g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ), + ) + lt_cond = lt(g, self, neg(g, lambd_op)) + lt_out = g.op( + "Where", + lt_cond, + add(g, self, lambd_op), + g.op( + "Constant", + value_t=torch.tensor(0, dtype=scalar_type.dtype()), + ), + ) + return add(g, gt_out, lt_out) + + +@_onnx_symbolic("aten::alias") +@_beartype.beartype +def alias(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("aten::unsqueeze") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def unsqueeze(g: jit_utils.GraphContext, self, dim): + # Handle negative dim + if dim < 0: + rank = symbolic_helper._get_tensor_rank(self) + if rank is not None: + warnings.warn( + "ONNX export unsqueeze with negative axis " + + str(dim) + + " might cause the onnx model to be incorrect. " + + "Negative axis is not supported in ONNX. " + + "Axis is converted to " + + str(dim + rank + 1) + + " based on input shape at export time. " + + "Passing an tensor of different rank in execution will be incorrect." + ) + dim = dim + rank + 1 + else: + return symbolic_helper._unimplemented( + "unsqueeze", "negative axis with unknown input rank", self + ) + + return symbolic_helper._unsqueeze_helper(g, self, axes_i=[dim]) + + +@_onnx_symbolic("aten::sort") +# TODO(justinchuby): Support multiple quantized args in output +@symbolic_helper.parse_args("v", "i", "i", "none") +@_beartype.beartype +def sort(g: jit_utils.GraphContext, self, dim, decending, out=None): + if out is not None: + symbolic_helper._unimplemented( + "Sort", "Out parameter is not supported for sort", self + ) + self_sizes = symbolic_helper._get_tensor_sizes(self) + try: + dim_size = self_sizes[dim] + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + dim_size = None + + if dim_size is None: + return symbolic_helper._unimplemented("Sort", "input size not accessible", self) + + return g.op("TopK", self, k_i=dim_size, axis_i=dim, outputs=2) + + +@_onnx_symbolic("aten::numel") +@_beartype.beartype +def numel(g: jit_utils.GraphContext, self): + shape = g.op("Shape", self) + return g.op("ReduceProd", shape, keepdims_i=0) + + +@_onnx_symbolic("aten::topk") +# TODO(justinchuby): Support multiple quantized args in output +@symbolic_helper.parse_args("v", "i", "i", "i", "i", "none") +@_beartype.beartype +def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None): + if out is not None: + symbolic_helper._unimplemented( + "TopK", "Out parameter is not supported for topk", self + ) + if not largest: + symbolic_helper._unimplemented("TopK", "Ascending TopK is not supported", self) + + return g.op("TopK", self, k_i=k, axis_i=dim, outputs=2) + + +@_onnx_symbolic("prim::convert_element_type") +@_beartype.beartype +def convert_element_type(g: jit_utils.GraphContext, self, *args): + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + + +@_onnx_symbolic("aten::to") +@_beartype.beartype +def to(g: jit_utils.GraphContext, self, *args): + @_beartype.beartype + def is_aten_to_device_only(args): + if len(args) == 4: + # aten::to(Tensor, Device, bool, bool, memory_format) + return ( + args[0].node().kind() == "prim::device" + or args[0].type().isSubtypeOf(_C.ListType.ofInts()) + or isinstance(args[0].type(), _C.DeviceObjType) + ) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + # When dtype is None, this is a aten::to(device) call + dtype = symbolic_helper._get_const(args[1], "i", "dtype") + return dtype is None + elif len(args) in (6, 7): + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor + # When dtype is None, this is a aten::to(device) call + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + return dtype is None + return False + + # ONNX doesn't have a concept of a device, so we ignore device-only casts + if is_aten_to_device_only(args): + return self + + if len(args) == 4: + # TestONNXRuntime::test_ones_bool shows args[0] of aten::to() can be onnx::Constant[value=]() + # In this case, the constant value is a tensor not int, + # so symbolic_helper._maybe_get_const(args[0], 'i') would not work. + dtype = args[0] + if ( + symbolic_helper._is_value(args[0]) + and args[0].node().kind() == "onnx::Constant" + ): + tval = symbolic_helper._node_get(args[0].node(), "value") + if isinstance(tval, torch.Tensor): + if len(tval.shape) == 0: + tval = tval.item() + dtype = int(tval) + else: + dtype = tval + + if symbolic_helper._is_value(dtype) or isinstance(dtype, torch.Tensor): + # aten::to(Tensor, Tensor, bool, bool, memory_format) + dtype = _type_utils.JitScalarType.from_value(args[0]) + return g.op( + "Cast", + self, + to_i=dtype.onnx_type(), + ) + else: + # aten::to(Tensor, ScalarType, bool, bool, memory_format) + # memory_format is ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + dtype = symbolic_helper._get_const(args[1], "i", "dtype") + # memory_format is ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + elif len(args) == 6: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + elif len(args) == 7: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor + dtype = symbolic_helper._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()) + + return symbolic_helper._onnx_unsupported("Unknown aten::to signature", self) + + +@_onnx_symbolic("aten::repeat") +@_beartype.beartype +def repeat(g: jit_utils.GraphContext, self, repeats): + dtype = _type_utils.JitScalarType.INT64 + shape_ = ones_like(g, repeats, dtype) + self = g.op("Expand", self, shape_) + return g.op("Tile", self, repeats) + + +@_onnx_symbolic("aten::repeat_interleave") +@_beartype.beartype +def repeat_interleave( + g: jit_utils.GraphContext, self, repeats, dim=None, output_size=None +): + repeats_dim = symbolic_helper._get_tensor_rank(repeats) + repeats_sizes = symbolic_helper._get_tensor_sizes(repeats) + input_sizes = symbolic_helper._get_tensor_sizes(self) + if repeats_dim is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown repeats rank.", + self, + ) + if repeats_sizes is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown repeats size.", + self, + ) + if input_sizes is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of repeat_interleave for unknown input size.", + self, + ) + + # if dim is None flatten + # By default, use the flattened input array, and return a flat output array + if symbolic_helper._is_none(dim): + self = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1])) + ) + dim = torch.tensor(0, dtype=torch.int64) + else: + dim = symbolic_helper._maybe_get_scalar(dim) + + # Handle cases where dim is negative + if dim < 0: + dim += len(input_sizes) + + input_sizes_temp = input_sizes.copy() + for idx, input_size in enumerate(input_sizes): + if input_size is None: + input_sizes[idx], input_sizes_temp[idx] = 0, -1 + + # Cases where repeats is an int or single value tensor + if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1): + if input_sizes[dim] == 0: + return symbolic_helper._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + self, + ) + return symbolic_helper._repeat_interleave_single_value_repeat_helper( + g, self, repeats, dim + ) + + # Cases where repeats is a 1 dim Tensor + elif repeats_dim == 1: + if input_sizes[dim] == 0: + return symbolic_helper._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + self, + ) + if repeats_sizes[0] is None: + return symbolic_helper._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported for cases with dynamic repeats", + self, + ) + assert ( + repeats_sizes[0] == input_sizes[dim] + ), "repeats must have the same size as input along dim" + reps = repeats_sizes[0] + else: + raise errors.SymbolicValueError("repeats must be 0-dim or 1-dim tensor", self) + + final_splits = list() + r_splits = symbolic_helper._repeat_interleave_split_helper(g, repeats, reps, 0) + i_splits = symbolic_helper._repeat_interleave_split_helper(g, self, reps, dim) + input_sizes[dim], input_sizes_temp[dim] = -1, 1 + for idx, r_split in enumerate(r_splits): + i_split = unsqueeze(g, i_splits[idx], dim + 1) + r_concat = [ + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[: dim + 1])), + r_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[dim + 1 :])), + ] + r_concat = g.op("Concat", *r_concat, axis_i=0) + i_split = expand(g, i_split, r_concat, None) + i_split = symbolic_helper._reshape_helper( + g, + i_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes)), + allowzero=0, + ) + final_splits.append(i_split) + return g.op("Concat", *final_splits, axis_i=dim) + + +@_onnx_symbolic("aten::pixel_shuffle") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def pixel_shuffle(g: jit_utils.GraphContext, self, upscale_factor): + dims = symbolic_helper._get_tensor_sizes(self) + if len(dims) != 4: + return symbolic_helper._unimplemented( + "pixel_shuffle", "only support 4d input", self + ) + if any(i is None for i in dims[1:]): + after_view = symbolic_helper._reshape_helper( + g, + symbolic_helper._unsqueeze_helper(g, self, [2, 3]), + g.op( + "Constant", + value_t=torch.tensor([0, -1, upscale_factor, upscale_factor, 0, 0]), + ), + allowzero=0, + ) + after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3]) + # For dynamic input shapes, two reshapes are performed + reshape_h = symbolic_helper._reshape_helper( + g, + after_transpose, + g.op("Constant", value_t=torch.tensor([0, 0, -1, 1, 0, 0])), + allowzero=0, + ) + reshape_w = symbolic_helper._reshape_helper( + g, + reshape_h, + g.op("Constant", value_t=torch.tensor([0, 0, 0, 0, -1, 1])), + allowzero=0, + ) + return symbolic_helper._squeeze_helper(g, reshape_w, [3, 5]) + else: + output_channel = dims[1] // upscale_factor // upscale_factor + after_view = symbolic_helper._reshape_helper( + g, + self, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + output_channel, + upscale_factor, + upscale_factor, + dims[2], + dims[3], + ] + ), + ), + allowzero=0, + ) + after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3]) + return symbolic_helper._reshape_helper( + g, + after_transpose, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + output_channel, + dims[2] * upscale_factor, + dims[3] * upscale_factor, + ] + ), + ), + allowzero=0, + ) + + +@_onnx_symbolic("aten::pixel_unshuffle") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def pixel_unshuffle(g: jit_utils.GraphContext, self, downscale_factor): + dims = symbolic_helper._get_tensor_sizes(self) + if len(dims) != 4: + return symbolic_helper._unimplemented( + "pixel_shuffle", "only support 4d input", self + ) + if any(i is None for i in dims[1:]): + # For dynamic input shapes, two reshapes are performed + reshape_h = symbolic_helper._reshape_helper( + g, + symbolic_helper._unsqueeze_helper(g, self, [3]), + g.op("Constant", value_t=torch.tensor([0, 0, -1, downscale_factor, 0])), + allowzero=0, + ) + reshape_w = symbolic_helper._reshape_helper( + g, + reshape_h, + g.op("Constant", value_t=torch.tensor([0, 0, 0, 0, -1, downscale_factor])), + allowzero=0, + ) + after_transpose = g.op("Transpose", reshape_w, perm_i=[0, 1, 3, 5, 2, 4]) + final_reshape = symbolic_helper._reshape_helper( + g, + after_transpose, + g.op("Constant", value_t=torch.tensor([0, -1, 1, 1, 0, 0])), + allowzero=0, + ) + return symbolic_helper._squeeze_helper(g, final_reshape, [2, 3]) + else: + output_channel = dims[1] * downscale_factor * downscale_factor + after_view = symbolic_helper._reshape_helper( + g, + self, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + dims[1], + dims[2] // downscale_factor, + downscale_factor, + dims[3] // downscale_factor, + downscale_factor, + ] + ), + ), + allowzero=0, + ) + after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 3, 5, 2, 4]) + return symbolic_helper._reshape_helper( + g, + after_transpose, + g.op( + "Constant", + value_t=torch.tensor( + [ + -1, + output_channel, + dims[2] // downscale_factor, + dims[3] // downscale_factor, + ] + ), + ), + allowzero=0, + ) + + +@_beartype.beartype +def _generic_rnn( + g: jit_utils.GraphContext, + variant, + input, + initial_states, + all_weights, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first=None, + batch_sizes=None, +): + warnings.warn( + "Exporting a model to ONNX with a batch_size other than 1, " + + "with a variable length with " + + variant + + " can cause an error " + + "when running the ONNX model with a different batch size. " + + "Make sure to save the model with a batch size of 1, " + + "or define the initial states (h0/c0) as inputs of the model. " + ) + + onnxActivations = [ + "Relu", + "Tanh", + "Sigmoid", + "Affine", + "LeakyRelu", + "ThresholdedRelu", + "ScaledTanh", + "HardSigmoid", + "Elu", + "Softsign", + "Softplus", + ] + variantToOnnxActivationMap = dict( + zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations) + ) + weights_per_layer = 4 if has_biases else 2 + # this means that projections are used inside LSTM, so need to tell user that it's not supported + if variant == "LSTM" and len(all_weights) != num_layers * weights_per_layer * ( + 1 + bidirectional + ): + return symbolic_helper._unimplemented("LSTM", "LSTMs with projections", input) + assert len(all_weights) == num_layers * weights_per_layer * (1 + bidirectional) + layer_weights = [ + all_weights[i : i + weights_per_layer] + for i in range(0, len(all_weights), weights_per_layer) + ] + if batch_first: + # batch, seq, feat -> seq, batch, feat + input = g.op("Transpose", input, perm_i=[1, 0, 2]) + if dropout and train: + return symbolic_helper._unimplemented( + "RNN/GRU/LSTM", "dropout in training mode", input + ) + + if variant.startswith("RNN"): + nonlinearity = variantToOnnxActivationMap[variant[4:].lower()] + variant = "RNN" + + w_hh = all_weights[1] + hidden_size = symbolic_helper._get_tensor_dim_size(w_hh, 1) + if hidden_size is None: + return symbolic_helper._unimplemented( + "RNN/GRU/LSTM", "unknown hidden size", input + ) + + unidirectional = not bidirectional + + prev_output = input + + h_outs = [] + if variant == "RNN" or variant == "GRU": + h0 = initial_states + elif variant == "LSTM": + h0, c0 = initial_states + c_outs = [] + + sequence_lens = unused(g) if batch_sizes is None else batch_sizes + + if variant == "GRU": + # pytorch is reset, input, hidden + # onnx is input, reset, hidden + reform_permutation = [(1, 2), (0, 1), (2, 3)] + elif variant == "LSTM": + # pytorch is input, forget, cell, output. + # onnx is input, output, forget, cell. + reform_permutation = [(0, 1), (3, 4), (1, 3)] + + @_beartype.beartype + def reform_weights(g, w, n, intervals): + slices = [ + symbolic_helper._slice_helper(g, w, axes=[0], starts=[x * n], ends=[y * n]) + for x, y in intervals + ] + return g.op("Concat", *slices, axis_i=0) + + @_beartype.beartype + def transform_weights_no_bias(layer_index): + weights = layer_weights[layer_index] + if variant == "RNN": + weight_ih, weight_hh = weights + elif variant == "GRU" or variant == "LSTM": + weight_ih, weight_hh = ( + reform_weights(g, w, hidden_size, reform_permutation) for w in weights + ) + return tuple( + symbolic_helper._unsqueeze_helper(g, x, [0]) for x in (weight_ih, weight_hh) # type: ignore[possibly-undefined] + ) + + @_beartype.beartype + def transform_weights(layer_index): + weights = layer_weights[layer_index] + if variant == "RNN": + weight_ih, weight_hh, bias_ih, bias_hh = weights + elif variant == "GRU" or variant == "LSTM": + weight_ih, weight_hh, bias_ih, bias_hh = ( + reform_weights(g, w, hidden_size, reform_permutation) for w in weights + ) + bias_concat = g.op("Concat", bias_ih, bias_hh, axis_i=0) # type: ignore[possibly-undefined] + return tuple( + symbolic_helper._unsqueeze_helper(g, x, [0]) + for x in (weight_ih, weight_hh, bias_concat) # type: ignore[possibly-undefined] + ) + + @_beartype.beartype + def retrieve_state(x, start, end): + return ( + x + if num_layers == 1 + else symbolic_helper._slice_helper( + g, x, axes=[0], starts=[start], ends=[end] + ) + ) + + for i in range(num_layers): + if unidirectional: + if weights_per_layer == 4: + weight_ih, weight_hh, bias_concat = transform_weights(i) + else: + weight_ih, weight_hh = transform_weights_no_bias(i) + bias_concat = unused(g) + + state_indices = i, i + 1 + else: + if weights_per_layer == 4: + weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i) + weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1) + bias_concat = g.op("Concat", bias_f, bias_b, axis_i=0) + else: + weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i) + weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1) + bias_concat = unused(g) + + weight_ih = g.op("Concat", weight_ih_f, weight_ih_b, axis_i=0) + weight_hh = g.op("Concat", weight_hh_f, weight_hh_b, axis_i=0) + + state_indices = 2 * i, 2 * i + 2 + + inputs = [prev_output, weight_ih, weight_hh, bias_concat, sequence_lens] + + inputs.append(retrieve_state(h0, *state_indices)) # type: ignore[possibly-undefined] + if variant == "LSTM": + inputs.append(retrieve_state(c0, *state_indices)) # type: ignore[possibly-undefined] + + extra_kwargs = {} if unidirectional else {"direction_s": "bidirectional"} + if variant == "RNN": + if bidirectional: + activation = [nonlinearity, nonlinearity] # type: ignore[possibly-undefined] + else: + activation = [nonlinearity] # type: ignore[possibly-undefined] + + prev_output, h_out = g.op( + "RNN", + *inputs, + outputs=2, + hidden_size_i=hidden_size, + activations_s=activation, + **extra_kwargs, + ) + elif variant == "GRU": + prev_output, h_out = g.op( + "GRU", + *inputs, + outputs=2, + hidden_size_i=hidden_size, + linear_before_reset_i=1, + **extra_kwargs, + ) + elif variant == "LSTM": + prev_output, h_out, c_out = g.op( + "LSTM", *inputs, outputs=3, hidden_size_i=hidden_size, **extra_kwargs + ) + + if bidirectional: + # The ONNX RNN/GRU/LSTM produce an output of dimensions + # seq_len, num_directions, batch, hidden_size + # We have to convert to match pytorch's expected + # seq_len, batch, num_directions * hidden_size + # by first moving num_directions before hidden_size with + # Transpose, and then combining it with hidden_size + # with Reshape. + prev_output = g.op("Transpose", prev_output, perm_i=[0, 2, 1, 3]) + prev_output = symbolic_helper._reshape_helper( + g, + prev_output, + g.op("Constant", value_t=torch.LongTensor([0, 0, -1])), + allowzero=0, + ) + else: + prev_output = symbolic_helper._squeeze_helper(g, prev_output, [1]) + + h_outs.append(h_out) # type: ignore[possibly-undefined] + if variant == "LSTM": + c_outs.append(c_out) # type: ignore[possibly-undefined] + if batch_first: + # seq, batch, num_directions * hidden_size -> batch, seq, num_directions * hidden_size + prev_output = g.op("Transpose", prev_output, perm_i=[1, 0, 2]) + h_outs = h_out if num_layers == 1 else g.op("Concat", *h_outs, axis_i=0) # type: ignore[possibly-undefined] + if variant == "RNN" or variant == "GRU": + return prev_output, h_outs + elif variant == "LSTM": + c_outs = c_out if num_layers == 1 else g.op("Concat", *c_outs, axis_i=0) # type: ignore[possibly-undefined] + return prev_output, h_outs, c_outs + + +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "f", "i", "i", "i") +@_beartype.beartype +def _lstm_full( + g: jit_utils.GraphContext, + input, + hidden_v, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, +): + hidden, weight = symbolic_helper._unpack_list( + hidden_v + ), symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + "LSTM", + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + ) + + +@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i", "f", "i", "i") +@_beartype.beartype +def _lstm_packed( + g: jit_utils.GraphContext, + input, + batch_sizes, + hidden_v, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, +): + hidden, weight = symbolic_helper._unpack_list( + hidden_v + ), symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + "LSTM", + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_sizes=batch_sizes, + ) + + +@_onnx_symbolic("aten::lstm") +@_beartype.beartype +def lstm(g: jit_utils.GraphContext, *args): + if symbolic_helper._is_tensor_list(args[3]): + return _lstm_packed(g, *args) + else: + return _lstm_full(g, *args) + + +@_onnx_symbolic("aten::lstm_cell") +@_beartype.beartype +def lstm_cell(g: jit_utils.GraphContext, self, hidden, w_ih, w_hh, b_ih, b_hh): + input = symbolic_helper._unsqueeze_helper(g, self, [0]) + hidden = symbolic_helper._unpack_list(hidden) + hidden = [symbolic_helper._unsqueeze_helper(g, x, [0]) for x in hidden] + weight = ( + (w_ih, w_hh, b_ih, b_hh) if symbolic_helper._is_tensor(b_ih) else (w_ih, w_hh) + ) + has_biases = True if symbolic_helper._is_tensor(b_ih) else False + _, h_outs, c_outs = _generic_rnn( + g, + "LSTM", + input, + hidden, + weight, + has_biases, + num_layers=1, + dropout=0, + train=0, + bidirectional=False, + batch_first=False, + ) + return symbolic_helper._squeeze_helper( + g, h_outs, [0] + ), symbolic_helper._squeeze_helper(g, c_outs, [0]) + + +@_onnx_symbolic("aten::gru", decorate=[_apply_params("GRU"), _export("gru")]) +@_onnx_symbolic( + "aten::rnn_tanh", decorate=[_apply_params("RNN_TANH"), _export("rnn_tanh")] +) +@_onnx_symbolic( + "aten::rnn_relu", decorate=[_apply_params("RNN_RELU"), _export("rnn_relu")] +) +def _one_hidden_rnn(kind: str): + @symbolic_helper.parse_args("v", "v", "v", "i", "i", "f", "i", "i", "i") + @_beartype.beartype + def _rnn_full( + g, + input, + hidden, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + ): + weight = symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + kind, + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + ) + + @symbolic_helper.parse_args("v", "v", "v", "v", "i", "i", "f", "i", "i") + def _rnn_packed( + g, + input, + batch_sizes, + hidden, + weight_v, + has_biases, + num_layers, + dropout, + train, + bidirectional, + ): + weight = symbolic_helper._unpack_list(weight_v) + return _generic_rnn( + g, + kind, + input, + hidden, + weight, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_sizes=batch_sizes, + ) + + def symbolic(g, *args): + if symbolic_helper._is_tensor_list(args[3]): + return _rnn_packed(g, *args) + else: + return _rnn_full(g, *args) + + return symbolic + + +@_onnx_symbolic("aten::_dim_arange") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def _dim_arange(g: jit_utils.GraphContext, like, dim): + like_shape = g.op("Shape", like) + stop = g.op( + "Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0 + ) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.op("_caffe2::Range", stop) + else: + # aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + return arange(g, stop, 4, None, None, None) + + +@_onnx_symbolic("aten::detach") +@_beartype.beartype +def detach(g: jit_utils.GraphContext, input): + # Erase aten::detach nodes because ONNX is inference only + return input + + +@_onnx_symbolic("aten::contiguous") +@symbolic_helper.parse_args("v", "i") +@_beartype.beartype +def contiguous(g: jit_utils.GraphContext, input, memory_format): + if memory_format > 2: # allower values are any, preserve and contiguous_format + raise errors.SymbolicValueError( + "onnx memory_format support is not implemented", input + ) + return input + + +@_onnx_symbolic("aten::_pack_padded_sequence") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def _pack_padded_sequence(g: jit_utils.GraphContext, input, lengths, batch_first): + # Currently there is no PackPadded operator in ONNX. We rely on an + # optimization pass to remove this later. It is an error if all + # PackPadded operators cannot be optimized out. + if batch_first: + input = g.op("Transpose", input, perm_i=[1, 0, 2]) + if not lengths.type().isSubtypeOf(torch._C.TensorType.get()): + raise errors.SymbolicValueError( + "'lengths' must be a Tensor for ONNX export", input + ) + # We know it's a TensorType so this check is now safe. + # It's really only necessary because those operators expand to something that + # only works with int32 types in Caffe2... + if ( + _type_utils.JitScalarType.from_value( + lengths, _type_utils.JitScalarType.UNDEFINED + ) + != _type_utils.JitScalarType.INT + ): + lengths = g.op("Cast", lengths, to_i=_C_onnx.TensorProtoDataType.INT32) + return g.op("prim::PackPadded", input, lengths, outputs=2) + + +@_onnx_symbolic("aten::_pad_packed_sequence") +@symbolic_helper.parse_args("v", "v", "i", "t", "v") +@_beartype.beartype +def _pad_packed_sequence( + g: jit_utils.GraphContext, + data, + batch_sizes, + batch_first, + padding_value, + total_length, +): + # Ignore total_length as it is not supported in _symbolic_pad_packed_sequence + # It is only useful/used when training using data_parallel model, so + # It shouldn't be relevant for ONNX anyway + data, lengths = g.op("prim::PadPacked", data, batch_sizes, outputs=2) + if batch_first: + data = g.op("Transpose", data, perm_i=[1, 0, 2]) + return data, lengths + + +@_onnx_symbolic("aten::randint") +@_beartype.beartype +def randint(g: jit_utils.GraphContext, low, high, shapes, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + low_i = symbolic_helper._get_const(low, "i", "low") + high_i = symbolic_helper._get_const(high, "i", "high") + if dtype is None: + scalar_type = _type_utils.JitScalarType.INT64 + else: + scalar_type = _type_utils.JitScalarType(dtype) + if low_i is None: + raise symbolic_helper._onnx_unsupported("randint", low) + if high_i is None: + raise symbolic_helper._onnx_unsupported("randint", high) + + shape = symbolic_helper._maybe_get_const(shapes, "is") + if symbolic_helper._is_value(shape): + shape_const = g.op( + "ConstantOfShape", + shapes, + value_t=torch.tensor([0], dtype=torch.float), + ) + randn = g.op( + "RandomUniformLike", + shape_const, + low_f=low_i, + high_f=high_i, + ) + else: + randn = g.op( + "RandomUniform", + shape_i=shape, + low_f=low_i, + high_f=high_i, + ) + + # cast to integer type + int_dtype = _type_utils.JitScalarType.INT64 + randint = g.op("Cast", randn, to_i=int_dtype.onnx_type()) + if int_dtype != scalar_type: + randint = g.op("Cast", randint, to_i=scalar_type.onnx_type()) + return randint + + +@_onnx_symbolic("aten::randint_like") +@_beartype.beartype +def randint_like(g: jit_utils.GraphContext, self, low, high, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + low_i = symbolic_helper._get_const(low, "i", "low") + high_i = symbolic_helper._get_const(high, "i", "high") + if dtype is None: + scalar_type = _type_utils.JitScalarType.INT64 + else: + scalar_type = _type_utils.JitScalarType(dtype) + if low_i is None: + raise symbolic_helper._onnx_unsupported("randint", low) + if high_i is None: + raise symbolic_helper._onnx_unsupported("randint", high) + + randn = g.op( + "RandomUniformLike", + self, + low_f=low_i, + high_f=high_i, + ) + + # cast to integer type + int_dtype = _type_utils.JitScalarType.INT64 + randint = g.op("Cast", randn, to_i=int_dtype.onnx_type()) + if int_dtype != scalar_type: + randint = g.op("Cast", randint, to_i=scalar_type.onnx_type()) + return randint + + +@_onnx_symbolic("aten::randn") +@_beartype.beartype +def randn(g: jit_utils.GraphContext, shapes, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + shape = symbolic_helper._maybe_get_const(shapes, "is") + if symbolic_helper._is_value(shape): + shape_const = g.op( + "ConstantOfShape", + shapes, + value_t=torch.tensor([0], dtype=torch.float), + ) + return g.op( + "RandomNormalLike", + shape_const, + dtype_i=scalar_type.onnx_type(), + ) + return g.op( + "RandomNormal", + shape_i=shape, + dtype_i=scalar_type.onnx_type(), + ) + + +@_onnx_symbolic("aten::rand") +@_beartype.beartype +def rand(g: jit_utils.GraphContext, shapes, dtype, *options): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + shape = symbolic_helper._maybe_get_const(shapes, "is") + if symbolic_helper._is_value(shape): + shape_const = g.op( + "ConstantOfShape", + shapes, + value_t=torch.tensor([0], dtype=torch.float), + ) + return g.op( + "RandomUniformLike", + shape_const, + dtype_i=scalar_type.onnx_type(), + ) + return g.op( + "RandomUniform", + shape_i=shape, + dtype_i=scalar_type.onnx_type(), + ) + + +@_onnx_symbolic("aten::randn_like") +@_beartype.beartype +def randn_like( + g: jit_utils.GraphContext, + self, + dtype, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + else: + scalar_type = _type_utils.JitScalarType(dtype) + return g.op("RandomNormalLike", self, dtype_i=scalar_type.onnx_type()) + + +@_onnx_symbolic("aten::rand_like") +@_beartype.beartype +def rand_like( + g: jit_utils.GraphContext, + self, + dtype, + layout=None, + device=None, + pin_memory=False, + memory_format=None, +): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + if dtype is None: + dtype = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + return g.op( + "RandomUniformLike", self, dtype_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + + +@_onnx_symbolic("aten::rrelu") +@symbolic_helper.parse_args("v", "f", "f", "i", "none") +@_beartype.beartype +def rrelu(g: jit_utils.GraphContext, input, lower, upper, training, generator): + if not training: + slope = (upper + lower) / 2.0 + return g.op("LeakyRelu", input, alpha_f=slope) + p = g.op("RandomUniformLike", input, high_f=upper, low_f=lower) + return g.op("PRelu", input, p) + + +@_onnx_symbolic("aten::bernoulli") +@_beartype.beartype +def bernoulli(g: jit_utils.GraphContext, input, p=None, generator=None, out=None): + if out is not None and not symbolic_helper._is_none(out): + symbolic_helper._unimplemented( + "Bernoulli", "out parameter is not supported for bernoulli", input + ) + if generator is not None and not symbolic_helper._is_none(generator): + symbolic_helper._unimplemented( + "Bernoulli", "generator is not supported for bernoulli", input + ) + + dtype = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.UNDEFINED + ) + if dtype == _type_utils.JitScalarType.UNDEFINED: + return symbolic_helper._unimplemented( + "Bernoulli", "input dtype not accessible", input + ) + + rands = g.op( + "RandomUniformLike", + input, + high_f=1.0, + low_f=0.0, + dtype_i=dtype.onnx_type(), + ) + prob = p if p is not None and not symbolic_helper._is_none(p) else input + output = g.op("Less", rands, prob) + return g.op("Cast", output, to_i=dtype.onnx_type()) + + +@_onnx_symbolic("aten::log_sigmoid") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def log_sigmoid(g: jit_utils.GraphContext, input): + p = g.op("Sigmoid", input) + return g.op("Log", p) + + +@_onnx_symbolic("aten::erf") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def erf(g: jit_utils.GraphContext, input): + return g.op("Erf", input) + + +@_onnx_symbolic("aten::flatten") +@symbolic_helper.quantized_args(True, False, False) +@symbolic_helper.parse_args("v", "i", "i") +@_beartype.beartype +def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim): + dim = symbolic_helper._get_tensor_rank(input) + if dim is None: + return symbolic_helper._unimplemented( + "dim", + "ONNX and PyTorch use different strategies to split the input. " + "Input rank must be known at export time.", + input, + ) + + if dim == 0: + return symbolic_helper._reshape_helper(g, input, [1]) + if dim == 1: + return g.op("Identity", input) + # TODO: remove this as onnx opset 11 spec allows negative axes + if end_dim < 0: + end_dim = dim + end_dim + # use ONNX's Flatten operator for cases where the output shape is 2D + if start_dim == 1 and end_dim == dim - 1: + return g.op("Flatten", input, axis_i=start_dim) + if start_dim == 0 and end_dim == dim - 2: + return g.op("Flatten", input, axis_i=end_dim + 1) + + return symbolic_helper._flatten_helper(g, input, start_dim, end_dim, dim) + + +@_onnx_symbolic("aten::nonzero") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def nonzero(g: jit_utils.GraphContext, input): + """Emitted from `torch.nonzero(x, as_tuple=False)`""" + return t(g, g.op("NonZero", input)) + + +@_onnx_symbolic("aten::nonzero_numpy") +# Emitted from `torch.nonzero(x, as_tuple=True)` +@_beartype.beartype +def nonzero_numpy(g: jit_utils.GraphContext, input, _outputs=None): + return unbind(g, nonzero(g, input), 1, _outputs=_outputs) + + +@_onnx_symbolic("aten::isnan") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def isnan(g: jit_utils.GraphContext, input): + output = g.op("IsNaN", input) + return output + + +@_onnx_symbolic("aten::any") +@_beartype.beartype +def _any(g: jit_utils.GraphContext, *args): + # aten::any(Tensor self) + if len(args) == 1: + input = args[0] + dim, keepdim = None, 0 + # aten::any(Tensor self, int[]? dim, bool keepdim) + else: + input, dim, keepdim = args + # Can be int list or single int + dim = symbolic_helper._parse_arg(dim, "t") + dim = [int(d) for d in dim.view(-1)] + keepdim = symbolic_helper._parse_arg(keepdim, "i") + input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT64) + input_sum = symbolic_helper._reducesum_helper( + g, input, axes_i=dim, keepdims_i=keepdim + ) + return gt(g, input_sum, g.op("Constant", value_t=torch.tensor(0, dtype=torch.long))) + + +@_onnx_symbolic("aten::all") +@_beartype.beartype +def _all(g: jit_utils.GraphContext, *args): + input = g.op("Not", args[0]) + # aten::all(Tensor self) + if len(args) == 1: + return g.op("Not", _any(g, input)) + # aten::all(Tensor self, int[]? dim, bool keepdim) + else: + return g.op("Not", _any(g, input, args[1], args[2])) + + +@_onnx_symbolic("aten::narrow") +@symbolic_helper.parse_args("v", "i", "i", "i") +@_beartype.beartype +def narrow(g: jit_utils.GraphContext, input, dim, start, length): + return symbolic_helper._slice_helper( + g, input, axes=[dim], starts=[start], ends=[start + length] + ) + + +@_onnx_symbolic("aten::argmax") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmax( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax") + + +@_onnx_symbolic("aten::argmin") +@symbolic_helper.parse_args("v", "v", "b") +@_beartype.beartype +def argmin( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, +): + return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin") + + +@_onnx_symbolic("aten::scatter") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def scatter(g: jit_utils.GraphContext, self, dim, index, src): + src_type = _type_utils.JitScalarType.from_value( + src, _type_utils.JitScalarType.UNDEFINED + ) + src = symbolic_helper._maybe_get_scalar(src) + if symbolic_helper._is_value(src): + return g.op("Scatter", self, index, src, axis_i=dim) + else: + # Check if scalar "src" has same type as self (PyTorch allows different + # type for scalar src (but not when src is tensor)). If not, insert Cast node. + self_scalar_type = _type_utils.JitScalarType.from_value(self) + if self_scalar_type != src_type: + src = g.op("Cast", src, to_i=self_scalar_type.onnx_type()) + return g.op("Scatter", self, index, expand_as(g, src, index), axis_i=dim) + + +@_onnx_symbolic("aten::scatter_add") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def scatter_add(g: jit_utils.GraphContext, self, dim, index, src): + scalar_type = symbolic_helper._try_get_scalar_type(self) + if scalar_type is None: + return symbolic_helper._unimplemented( + "scatter_add", "input dtype not accessible", self + ) + sizes = symbolic_helper._get_tensor_sizes(self, allow_nonstatic=False) + if sizes: + to_add = g.op("Constant", value_t=torch.zeros(sizes, dtype=scalar_type.dtype())) + else: + to_add = zeros_like(g, self, scalar_type) + to_add = symbolic_helper._scatter_helper(g, to_add, dim, index, src) + return add(g, self, to_add) + + +@_onnx_symbolic("aten::log2") +@_beartype.beartype +def log2(g: jit_utils.GraphContext, self): + _ln2 = 0.693147180559945309 + return g.op("Div", log(g, self), g.op("Constant", value_t=torch.tensor(_ln2))) + + +@_onnx_symbolic("aten::is_floating_point") +@_beartype.beartype +def is_floating_point(g: jit_utils.GraphContext, self): + if symbolic_helper._is_fp(self): + return g.op("Constant", value_t=torch.BoolTensor([1])) + return g.op("Constant", value_t=torch.BoolTensor([0])) + + +@_onnx_symbolic("aten::__is_") +@_beartype.beartype +def __is_(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_none(other): + if symbolic_helper._is_none(self): + return g.op("Constant", value_t=torch.BoolTensor([1])) + return g.op("Constant", value_t=torch.BoolTensor([0])) + return eq(g, self, other) + + +@_onnx_symbolic("aten::__isnot_") +@wrap_logical_op_with_negation +@_beartype.beartype +def __isnot_(g: jit_utils.GraphContext, self, other): + return __is_(g, self, other) + + +@_onnx_symbolic("aten::one_hot") +@_beartype.beartype +def one_hot(g: jit_utils.GraphContext, self, num_classes): + values = g.op("Constant", value_t=torch.LongTensor([0, 1])) + # onnxruntime supports limited type combinations for OneHot. + if _type_utils.JitScalarType.from_value( + num_classes, _type_utils.JitScalarType.UNDEFINED + ) in { + _type_utils.JitScalarType.UINT8, + _type_utils.JitScalarType.INT8, + _type_utils.JitScalarType.INT, + _type_utils.JitScalarType.INT16, + }: + num_classes = g.op("Cast", num_classes, to_i=_C_onnx.TensorProtoDataType.INT64) + return g.op("OneHot", self, num_classes, values, axis_i=-1) + + +@_onnx_symbolic("aten::gather") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def gather(g: jit_utils.GraphContext, self, dim, index, sparse_grad=False): + if symbolic_helper._maybe_get_const(sparse_grad, "i"): + return symbolic_helper._unimplemented("gather", "sparse_grad == True", self) + # NOTE: This workaround is needed since GatherElement is only supported + # since opset 11, and Gather in ONNX is not the same as torch.gather. + scalar_type = _type_utils.JitScalarType.from_value(self) + values = g.op("Constant", value_t=torch.LongTensor([0, 1])) + depth = size(g, self, g.op("Constant", value_t=torch.LongTensor([dim]))) + index = g.op( + "Cast", + g.op("OneHot", index, depth, values, axis_i=dim), + to_i=scalar_type.onnx_type(), + ) + mul = g.op("Mul", symbolic_helper._unsqueeze_helper(g, self, [dim + 1]), index) + return symbolic_helper._reducesum_helper(g, mul, axes_i=[dim], keepdims_i=0) + + +@symbolic_helper.parse_args("v", "is", "i", "i") +@_beartype.beartype +def _var_mean(g: jit_utils.GraphContext, input, dim, correction, keepdim): + if dim is None: + mean = g.op("ReduceMean", input, keepdims_i=0) + t_mean = mean + num_elements = numel(g, input) + else: + mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=keepdim) + t_mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=1) + redudced_dims = g.op("Shape", input) + # dim could contain one or multiple dimensions + redudced_dims = g.op( + "Gather", + redudced_dims, + g.op("Constant", value_t=torch.tensor(dim)), + axis_i=0, + ) + num_elements = g.op("ReduceProd", redudced_dims, keepdims_i=0) + sub_v = g.op("Sub", input, t_mean) + sqr_sub = g.op("Mul", sub_v, sub_v) + keepdim_mean = 0 if dim is None else keepdim + var = g.op("ReduceMean", sqr_sub, axes_i=dim, keepdims_i=keepdim_mean) + # Correct bias in calculating variance, by dividing it over (N - correction) instead on N + if correction is None: + correction = 1 + if correction != 0: + num_elements = g.op( + "Cast", num_elements, to_i=_C_onnx.TensorProtoDataType.FLOAT + ) + one = g.op("Constant", value_t=torch.tensor(correction, dtype=torch.float)) + mul = g.op("Mul", var, num_elements) + var = g.op("Div", mul, g.op("Sub", num_elements, one)) + return var, mean + + +@_onnx_symbolic("aten::std") +@_beartype.beartype +def std(g: jit_utils.GraphContext, input, *args): + var, _ = var_mean(g, input, *args) + return g.op("Sqrt", var) + + +@_onnx_symbolic("aten::var") +@_beartype.beartype +def var(g: jit_utils.GraphContext, input, *args): + var, _ = var_mean(g, input, *args) + return var + + +@_onnx_symbolic("aten::var_mean") +@_beartype.beartype +def var_mean(g: jit_utils.GraphContext, input, *args): + # var_mean (and all variance-related functions) has multiple signatures, so need to manually figure + # out the correct arguments: + # aten::var_mean(Tensor self, bool unbiased) + # aten::var_mean(Tensor self, int[1] dim, bool unbiased, bool keepdim=False) + # aten::var_mean(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) + if len(args) == 1: + return _var_mean(g, input, None, args[0], None) + else: + return _var_mean(g, input, *args) + + +@_onnx_symbolic("aten::std_mean") +@_beartype.beartype +def std_mean(g: jit_utils.GraphContext, input, *args): + var, mean = var_mean(g, input, *args) + return g.op("Sqrt", var), mean + + +@_onnx_symbolic("aten::logsumexp") +@symbolic_helper.parse_args("v", "is", "i") +@_beartype.beartype +def logsumexp(g: jit_utils.GraphContext, input, dim, keepdim): + return g.op("ReduceLogSumExp", input, axes_i=dim, keepdims_i=keepdim) + + +@_onnx_symbolic("aten::arange") +@_beartype.beartype +def arange(g: jit_utils.GraphContext, *args): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("arange", *args) + + @_beartype.beartype + def _get_arange_dtype(dtype): + dtype = symbolic_helper._maybe_get_const(dtype, "i") + return dtype + + @_beartype.beartype + def _float_step_convert(range_tensor): + if symbolic_helper._is_fp(range_tensor): + range_tensor = g.op( + "Cast", + g.op("Ceil", range_tensor), + to_i=_type_utils.JitScalarType.INT64.onnx_type(), + ) + return range_tensor + + if len(args) == 2 or len(args) == 5: + if len(args) == 2: + # aten::arange(Scalar end, Tensor out) + dtype = None + else: + # aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[1]) + dtype, end, start, step = symbolic_helper._arange_cast_helper( + g, end=args[0], dtype=dtype + ) + end = symbolic_helper._unsqueeze_helper(g, end, [0]) + range_tensor = _float_step_convert(end) + arange_tensor = symbolic_helper._squeeze_helper( + g, nonzero(g, ones(g, range_tensor, dtype, None, None)), [1] + ) + return g.op( + "Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + elif len(args) == 4 or len(args) == 7: + if len(args) == 4: + # aten::arange(Scalar start, Scalar end, Scalar step, Tensor out) + dtype = None + else: + # aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[3]) + dtype, end, start, step = symbolic_helper._arange_cast_helper( + g, start=args[0], end=args[1], step=args[2], dtype=dtype + ) + step = symbolic_helper._unsqueeze_helper(g, step, [0]) + end = symbolic_helper._unsqueeze_helper(g, end, [0]) + start = symbolic_helper._unsqueeze_helper(g, start, [0]) + range_tensor = _float_step_convert(g.op("Div", g.op("Sub", end, start), step)) + arange_tensor = symbolic_helper._squeeze_helper( + g, nonzero(g, ones(g, range_tensor, None, None, None)), [1] + ) + arange_tensor = g.op("Add", g.op("Mul", arange_tensor, step), start) + return g.op( + "Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + elif len(args) == 6: + # aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + dtype = _get_arange_dtype(args[2]) + dtype, end, start, step = symbolic_helper._arange_cast_helper( + g, start=args[0], end=args[1], dtype=dtype + ) + end = symbolic_helper._unsqueeze_helper(g, end, [0]) + start = symbolic_helper._unsqueeze_helper(g, start, [0]) + range_tensor = _float_step_convert(g.op("Sub", end, start)) + arange_tensor = g.op( + "Add", + symbolic_helper._squeeze_helper( + g, nonzero(g, ones(g, range_tensor, dtype, *(args[3:]))), [1] + ), + start, + ) + return g.op( + "Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + + return symbolic_helper._unimplemented("aten::arange", f"with {len(args)} arguments") + + +@_onnx_symbolic("aten::linspace") +@_beartype.beartype +def linspace( + g: jit_utils.GraphContext, start, end, steps, dtype, layout, device, pin_memory +): + range_tensor = symbolic_helper._arange_helper(g, steps, None) + step = div( + g, + sub(g, end, start), + sub(g, steps, g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))), + ) + return add(g, mul(g, range_tensor, step), start) + + +@_onnx_symbolic("aten::lift") +@_beartype.beartype +def lift(g: jit_utils.GraphContext, self): + # at::lift() is a no-op from the perspective of tracing for onnx + return self + + +@_onnx_symbolic("aten::masked_fill") +@_beartype.beartype +def masked_fill(g: jit_utils.GraphContext, self, mask, value): + mask = g.op("Cast", mask, to_i=_C_onnx.TensorProtoDataType.BOOL) + value = symbolic_helper._maybe_get_scalar(value) + return g.op("Where", mask, symbolic_helper._if_scalar_type_as(value, self), self) + + +@_onnx_symbolic("aten::masked_fill_") +@_beartype.beartype +def masked_fill_(g: jit_utils.GraphContext, self, mask, value): + return masked_fill(g, self, mask, value) + + +@_onnx_symbolic("aten::index") +@_beartype.beartype +def index(g: jit_utils.GraphContext, self, index): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("index", self, index, overload_name="Tensor") + + if symbolic_helper._is_packed_list(index): + indices = symbolic_helper._unpack_list(index) + else: + indices = [index] + + @_beartype.beartype + def try_mask_to_index(index): + if not symbolic_helper._is_none(index) and ( + _type_utils.JitScalarType.from_value( + index, _type_utils.JitScalarType.UNDEFINED + ) + == _type_utils.JitScalarType.UINT8 + or symbolic_helper._is_bool(index) + ): + if g.opset < 9: + raise errors.SymbolicValueError( + "Exporting masked indices are only supported after ONNX opset 9.", + self, + ) + warnings.warn( + "Exporting aten::index operator with indices of type Byte. " + "Only 1-D indices are supported. In any other case, " + "this will produce an incorrect ONNX graph." + ) + index = symbolic_helper._squeeze_helper(g, nonzero(g, index), [1]) + return index + + indices = [try_mask_to_index(idx) for idx in indices] + if len(indices) == 1: + return symbolic_helper._select_helper( + g, self, 0, indices[0], apply_reshape=False + ) + else: + # Multiple tensors as indices. Each tensor could either be + # 1. prim::Constant() + # representing ":" in python indexing. E.g. tensor[:, :] + # 2. prim::Constant[value=...] or tensor output + # representing advanced indexing. E.g. tensor[[0, 1], [2, 0]]. + # For more info on advanced indexing, + # check https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing + + # Consider a general case of + # t: [x_1, y_1, y_2, ..., x_m, ..., y_n] + # where t is a tensor of rank m+n, {x_i} are axes where tensor index is provided, and {y_i} are axes for ":". + # Same results can be achieved through transposing t into + # t: [x_1, x_2, ..., x_m, y_1, y_2, ..., y_n] + # and use gatherND. However ONNX does not have gatherND, to use 1d gather we'll need to flatten t + # and process the tensor indices. + # t: [x_1 * x_2 * ... * x_m, y_1 * y_2 * ... * y_n] + # tensor index = \sum_{i=1}^m (ind_i * \prod_{j=i+1}^m (x_j)) + # After gather, reshape and transpose back. + adv_idx_indices = [ + i for i, idx in enumerate(indices) if not symbolic_helper._is_none(idx) + ] + + if len(adv_idx_indices) == 0: + return self + elif len(adv_idx_indices) == 1: + return index_select( + g, self, adv_idx_indices[0], indices[adv_idx_indices[0]] + ) + else: + rank = symbolic_helper._get_tensor_rank(self) + if rank is None: + return symbolic_helper._unimplemented( + "aten::index", + "operator of advanced indexing on tensor of unknown rank. " + "Try turning on shape inference during export: " + "torch.onnx._export(..., onnx_shape_inference=True).", + self, + ) + # TODO: If indexing is supported natively in ONNX in future opsets, + # update the warning to recommend exporting with higher opset version. + warnings.warn( + "Exporting aten::index operator of advanced indexing in opset " + f"{GLOBALS.export_onnx_opset_version}" + " is achieved by combination of multiple ONNX operators, " + "including Reshape, Transpose, Concat, and Gather. " + "If indices include negative values, the exported graph will produce incorrect results." + ) + adv_idx_count = len(adv_idx_indices) + shape_tensor = _shape_as_tensor(g, self) + dim_tensor_list = [ + g.op( + "Gather", + shape_tensor, + g.op("Constant", value_t=torch.LongTensor([dim])), + axis_i=0, + ) + for dim in range(rank) + ] + + self = g.op( + "Transpose", + self, + perm_i=adv_idx_indices + + [i for i in range(rank) if i not in adv_idx_indices], + ) + self = g.op("Flatten", self, axis_i=adv_idx_count) + + # Note that tensor indices will be broadcasted while accumulating. Thus we get the final subarray shape as well. + cum_adv_index = indices[adv_idx_indices[-1]] + multiplier = dim_tensor_list[adv_idx_indices[-1]] + for i in range(adv_idx_count - 2, -1, -1): + adv_index = g.op("Mul", indices[adv_idx_indices[i]], multiplier) + cum_adv_index = g.op("Add", cum_adv_index, adv_index) + multiplier = g.op( + "Mul", multiplier, dim_tensor_list[adv_idx_indices[i]] + ) + + # perform gather + self = index_select(g, self, 0, cum_adv_index) + + cum_adv_index_shape_tensor = _shape_as_tensor(g, cum_adv_index) + # check if all advanced indices are consecutive. + # Refer to https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing + # to understand how the subarray position is decided. + if adv_idx_indices == list( + range(adv_idx_indices[0], adv_idx_indices[-1] + 1) + ): + # unfold regular index axes + folded_adv_idx_shape_list = [ + g.op("Constant", value_t=torch.LongTensor([-1])) + ] + [ + dim_tensor_list[i] for i in range(rank) if i not in adv_idx_indices + ] + folded_adv_idx_shape = g.op( + "Concat", *folded_adv_idx_shape_list, axis_i=0 + ) + self = symbolic_helper._reshape_helper(g, self, folded_adv_idx_shape) + + # Transpose folded advanced indexed axis to its original location. + adv_idx_permute = ( + list(range(1, adv_idx_indices[0] + 1)) + + [0] + + list(range(adv_idx_indices[0] + 1, rank - adv_idx_count + 1)) + ) + self = g.op("Transpose", self, perm_i=adv_idx_permute) + + # unfold advanced index axes + final_shape_list = ( + [dim_tensor_list[i] for i in range(adv_idx_indices[0])] + + [cum_adv_index_shape_tensor] + + [ + dim_tensor_list[i] + for i in range(adv_idx_indices[0], rank) + if i not in adv_idx_indices + ] + ) + final_shape = g.op("Concat", *final_shape_list, axis_i=0) + else: + final_shape = g.op( + "Concat", + cum_adv_index_shape_tensor, + *[ + dim_tensor_list[i] + for i in range(rank) + if i not in adv_idx_indices + ], + axis_i=0, + ) + + return symbolic_helper._reshape_helper(g, self, final_shape) + + +@_onnx_symbolic("aten::linalg_norm") +@symbolic_helper.parse_args("v", "v", "is", "b", "v") +@_beartype.beartype +def linalg_norm( + g: jit_utils.GraphContext, + self: torch._C.Value, + ord: torch._C.Value, + dim: Optional[Sequence[int]], + keepdim: bool, + dtype: torch._C.Value, +): + # Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.norm.html + ord_value = None + if dim is None: + if symbolic_helper._is_none(ord): + self = symbolic_helper._reshape_helper(g, self, [-1]) + ord = g.op("Constant", value_t=torch.LongTensor([2])) + self_dim = symbolic_helper._get_tensor_rank(self) + if self_dim is None: + return symbolic_helper._unimplemented( + "dim", "Input rank must be known at export time.", self + ) + if self_dim == 1: + ord_value = symbolic_helper._parse_arg(ord, "f") + else: + dim = [0, 1] + else: + if len(dim) == 1: + if symbolic_helper._is_none(ord): + ord = g.op("Constant", value_t=torch.LongTensor([2])) + ord_value = symbolic_helper._parse_arg(ord, "f") + if ord_value: + return linalg_vector_norm(g, self, ord_value, dim, keepdim, dtype) + return linalg_matrix_norm(g, self, ord, dim, keepdim, dtype) + + +@_onnx_symbolic("aten::linalg_vector_norm") +@symbolic_helper.parse_args("v", "f", "is", "b", "v") +@_beartype.beartype +def linalg_vector_norm( + g: jit_utils.GraphContext, + self: torch._C.Value, + ord: float, + dim: Optional[Sequence[int]], + keepdim: bool, + dtype: torch._C.Value, +): + # Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.vector_norm.html + if symbolic_helper._is_none(dim): + self = symbolic_helper._reshape_helper(g, self, [-1]) + keepdim = False + + if ord == math.inf: + result = g.op("ReduceMax", g.op("Abs", self), axes_i=dim, keepdims_i=keepdim) + elif ord == -math.inf: + result = g.op("ReduceMin", g.op("Abs", self), axes_i=dim, keepdims_i=keepdim) + elif ord == 0: + return symbolic_helper._onnx_opset_unsupported_detailed( + "linalg_vector_norm", 9, 11, "ord=0 not supported", self + ) + elif ord == 1: + result = _reduce_op_symbolic("ReduceL1")(g, self, dim=dim, keepdim=keepdim) + elif ord == 2: + result = _reduce_op_symbolic("ReduceL2")(g, self, dim=dim, keepdim=keepdim) + else: + ord_op = g.op("Constant", value_t=torch.tensor(ord, dtype=torch.float32)) + result = symbolic_helper._reducesum_helper( + g, g.op("Pow", g.op("Abs", self), ord_op), axes_i=dim, keepdims_i=keepdim + ) + result = g.op( + "Pow", + result, + g.op( + "Div", + g.op("Constant", value_t=torch.tensor(1, dtype=torch.float32)), + ord_op, + ), + ) + + if not symbolic_helper._is_none(dtype): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + result = g.op("Cast", result, to_i=_type_utils.JitScalarType(dtype).onnx_type()) # type: ignore[arg-type] + return result + + +@_onnx_symbolic("aten::linalg_matrix_norm") +@symbolic_helper.parse_args("v", "v", "is", "b", "v") +@_beartype.beartype +def linalg_matrix_norm( + g: jit_utils.GraphContext, + self: torch._C.Value, + ord: torch._C.Value, + dim: List[int], + keepdim: bool, + dtype: torch._C.Value, +): + # Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.matrix_norm.html + ord_value = symbolic_helper._parse_arg(ord, "s") + if ord_value == "fro": + return frobenius_norm(g, self, dim, keepdim) + elif ord_value == "nuc": + return symbolic_helper._unimplemented("linalg.matrix_norm", "ord==nuc", self) + else: + ord_value = symbolic_helper._parse_arg(ord, "f") + if ord_value is None: + return frobenius_norm(g, self, dim, keepdim) + if ord_value == 2 or ord_value == -2: + # ord = 2/-2 unimplemented due to lack of operators + # used to calculate singular values + return symbolic_helper._unimplemented("linalg.matrix_norm", "ord==2", self) + # Wrap the dim vector to handle negative dim values + self_dim = symbolic_helper._get_tensor_rank(self) + if self_dim is None: + return symbolic_helper._unimplemented( + "linalg.matrix_norm", "Input rank must be known at export time.", self + ) + # Common implementation for cases with + # ord = 1/-1 and ord = inf/-inf + if dim[0] < 0: + dim[0] += self_dim + if dim[1] < 0: + dim[1] += self_dim + + if ord_value == math.inf or ord_value == -math.inf: + dim[0], dim[1] = dim[1], dim[0] + if dim[1] > dim[0] and not keepdim: + dim[1] -= 1 + sum = symbolic_helper._reducesum_helper( + g, g.op("Abs", self), axes_i=[dim[0]], keepdims_i=keepdim + ) + if ord_value > 0: + result, indices = max( + g, + sum, + dim_or_y=g.op("Constant", value_t=torch.LongTensor([dim[1]])), + keepdim=keepdim, + ) + else: + result, indices = min( + g, + sum, + dim_or_y=g.op("Constant", value_t=torch.LongTensor([dim[1]])), + keepdim=keepdim, + ) + return result + + +@_onnx_symbolic("aten::linalg_cross") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def linalg_cross(g: jit_utils.GraphContext, input, other, dim=-1): + return cross(g, input, other, dim) + + +@_onnx_symbolic("aten::frobenius_norm") +@symbolic_helper.parse_args("v", "is", "b") +@_beartype.beartype +def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False): + sqr = g.op("Mul", self, self) + sumsqr = symbolic_helper._reducesum_helper(g, sqr, axes_i=dim, keepdims_i=keepdim) + return g.op("Sqrt", sumsqr) + + +@_onnx_symbolic("aten::multinomial") +@symbolic_helper.parse_args("v", "i", "b", "v") +@_beartype.beartype +def multinomial( + g: jit_utils.GraphContext, input, num_samples, replacement=False, generator=None +): + if generator is not None and not symbolic_helper._is_none(generator): + symbolic_helper._unimplemented( + "Multinomial", "generator is not supported for multinomial", input + ) + if not replacement and num_samples > 1: + symbolic_helper._unimplemented( + "Multinomial", + "replacement=False when num_samples > 1 is not supported for multinomial", + input, + ) + + log_input = log(g, input) + return g.op( + "Multinomial", + log_input, + dtype_i=_C_onnx.TensorProtoDataType.INT64, + sample_size_i=num_samples, + ) + + +@_onnx_symbolic("aten::baddbmm") +@_beartype.beartype +def baddbmm(g: jit_utils.GraphContext, self, batch1, batch2, beta, alpha): + scalar_type = _type_utils.JitScalarType.from_value(self) + batch_mul = matmul(g, batch1, batch2) + mul_a = mul( + g, + batch_mul, + g.op("Cast", alpha, to_i=scalar_type.onnx_type()), + ) + mul_b = mul( + g, + self, + g.op("Cast", beta, to_i=scalar_type.onnx_type()), + ) + return add(g, mul_a, mul_b) + + +@_onnx_symbolic("aten::meshgrid") +@symbolic_helper.parse_args("v", "s") +@_beartype.beartype +def meshgrid(g: jit_utils.GraphContext, tensor_list, indexing: Optional[str] = None): + if indexing is None: + indexing = "ij" + elif indexing not in {"ij", "xy"}: + raise errors.SymbolicValueError( + f"Unsupported indexing: {indexing}", tensor_list + ) + unpacked_tensor_list = symbolic_helper._unpack_list(tensor_list) + if indexing == "xy": + unpacked_tensor_list[:2] = unpacked_tensor_list[1::-1] + tensors = [ + symbolic_helper._reshape_helper( + g, t, g.op("Constant", value_t=torch.LongTensor([-1])) + ) + for t in unpacked_tensor_list + ] + tensors_shape = [g.op("Shape", t) for t in tensors] + out_shape = g.op("Concat", *tensors_shape, axis_i=0) + out = [] + for i, t in enumerate(tensors): + shape_i = [g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))] * len( + tensors + ) + shape_i[i] = tensors_shape[i] + t_reshaped = _reshape_from_tensor(g, t, g.op("Concat", *shape_i, axis_i=0)) + out.append(g.op("Expand", t_reshaped, out_shape)) + if indexing == "xy": + out[0], out[1] = out[1], out[0] + return g.op("prim::ListConstruct", *out) + + +@_onnx_symbolic("aten::remainder") +@_beartype.beartype +def remainder(g: jit_utils.GraphContext, input, other): + div = _floor_divide(g, input, other) + quo = g.op("Mul", div, other) + return g.op("Sub", input, quo) + + +@_onnx_symbolic("aten::gelu") +@symbolic_helper.parse_args("v", "s") +@_beartype.beartype +def gelu(g: jit_utils.GraphContext, self: torch._C.Value, approximate: str = "none"): + if approximate == "tanh": + kBeta = math.sqrt(2 / math.pi) + kKappa = 0.044715 + + beta = torch.tensor(kBeta, dtype=torch.double) + kappa = torch.tensor(kKappa, dtype=torch.double) + one = torch.tensor(1.0, dtype=torch.double) + half = torch.tensor(0.5, dtype=torch.double) + + self_cube = mul(g, self, mul(g, self, self)) + inner = mul(g, beta, add(g, self, mul(g, kappa, self_cube))) + return mul(g, half, mul(g, self, add(g, one, g.op("Tanh", inner)))) + else: + _sqrt2 = 1.4142135623730951 + erf = g.op("Erf", g.op("Div", self, torch.tensor(_sqrt2, dtype=torch.double))) + erf_plusone = add( + g, erf, g.op("Constant", value_t=torch.tensor(1, dtype=torch.double)) + ) + return mul( + g, + mul(g, self, erf_plusone), + g.op("Constant", value_t=torch.tensor(0.5, dtype=torch.double)), + ) + + +@_onnx_symbolic("aten::group_norm") +@symbolic_helper.quantized_args(True, False, False, False) +@symbolic_helper.parse_args("v", "i", "v", "v", "f", "i") +@_beartype.beartype +def group_norm( + g: jit_utils.GraphContext, input, num_groups, weight, bias, eps, cudnn_enabled +): + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at( + "group_norm", + input, + weight, + bias, + num_groups_i=num_groups, + eps_f=eps, + cudnn_enabled_i=cudnn_enabled, + ) + + channel_size = symbolic_helper._get_tensor_dim_size(input, 1) + if channel_size is not None: + assert channel_size % num_groups == 0 + input_rank = symbolic_helper._get_tensor_rank(input) + if input_rank is None: + return symbolic_helper._unimplemented("group_norm", "unknown input rank", input) + # 0 in the shape list keeps dimension value unchanged. + shape = [0, num_groups, -1] + input_reshaped = symbolic_helper._reshape_helper( + g, input, g.op("Constant", value_t=torch.LongTensor(shape)) + ) + + # C is always divisible by num_groups + # Due to shape difference. we need to apply weight and bias after + # instance norm computation and reshape + weight_ = g.op( + "Constant", + value_t=torch.tensor( + [1.0] * num_groups, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ), + ) + bias_ = g.op( + "Constant", + value_t=torch.tensor( + [0.0] * num_groups, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ), + ) + + norm_reshaped = g.op( + "InstanceNormalization", input_reshaped, weight_, bias_, epsilon_f=eps + ) + norm = symbolic_helper._reshape_helper(g, norm_reshaped, g.op("Shape", input)) + + if weight is None or weight.node().mustBeNone(): + weight_value = torch.tensor( + [1.0], dtype=_type_utils.JitScalarType.from_value(input).dtype() + ) + weight = g.op("Constant", value_t=weight_value) + if bias is None or bias.node().mustBeNone(): + bias_value = torch.tensor( + [0.0], dtype=_type_utils.JitScalarType.from_value(input).dtype() + ) + bias = g.op("Constant", value_t=bias_value) + + # Norm has shape [N, C, *] so we reshape weight and bias to [C, *] + axes = list(range(1, input_rank - 1)) + return add( + g, + mul(g, norm, symbolic_helper._unsqueeze_helper(g, weight, axes)), + symbolic_helper._unsqueeze_helper(g, bias, axes), + ) + + +@_onnx_symbolic("aten::_weight_norm") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def _weight_norm(g: jit_utils.GraphContext, weight_v, weight_g, dim): + rank = symbolic_helper._get_tensor_rank(weight_v) + if rank is not None: + # W = g * ((v) / ||v||) + # Compute norm_except_dim for l2 norm. dim = None means over all dims + # torch's weight_norm module sets dim = -1 if it's None. + # This conflicts the logic for negative axes to access dims backwards + # TODO: Might need a fix in torch group_norm module + axes = list(range(rank)) + if dim is not None: + if dim < -1: + dim += rank + if dim != -1: + axes.remove(dim) + norm_v = norm(g, weight_v, 2, axes, 1) + div = g.op("Div", weight_v, norm_v) + return g.op("Mul", div, weight_g) + if symbolic_helper.is_caffe2_aten_fallback(): + return g.at("_weight_norm", weight_v, weight_g, dim_i=dim) + + raise errors.SymbolicValueError( + "Unsupported: ONNX export of _weight_norm for tensor of unknown rank.", + weight_v, + ) + + +@_onnx_symbolic("aten::dim") +@_beartype.beartype +def dim(g: jit_utils.GraphContext, self): + """Implement the dim functionality available for a pytorch tensor in ONNX""" + # ONNX does not support dim directly in this opset so we can use 2 ops to get the info + shape = g.op("Shape", self) + return g.op("Size", shape) + + +@_onnx_symbolic("aten::__contains_") +@_beartype.beartype +def __contains_(g: jit_utils.GraphContext, self, element): + unpacked_list = symbolic_helper._unpack_list(self) + if all( + symbolic_helper._is_constant(x) for x in unpacked_list + ) and symbolic_helper._is_constant(element): + return g.op( + "Constant", + value_t=torch.tensor( + symbolic_helper._node_get(element.node(), "value") + in (symbolic_helper._node_get(x.node(), "value") for x in unpacked_list) + ), + ) + + raise errors.SymbolicValueError( + "Unsupported: ONNX export of __contains__ for non-constant list or element.", + self, + ) + + +@_onnx_symbolic("aten::__getitem_") +@_beartype.beartype +def __getitem_(g: jit_utils.GraphContext, self, i): + return select(g, self, g.op("Constant", value_t=torch.tensor([0])), i) + + +@_onnx_symbolic("aten::item") +@_beartype.beartype +def item(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("aten::take") +@_beartype.beartype +def take(g: jit_utils.GraphContext, self, index): + self_flattened = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + ) + out = index_select(g, self_flattened, 0, index) + out = reshape_as(g, out, index) + return out + + +@_beartype.beartype +def _kl_div_log_target_impl(g: jit_utils.GraphContext, input, target): + diff_ = sub(g, target, input) + exp_ = exp(g, target) + output = mul(g, exp_, diff_) + return output + + +@_beartype.beartype +def _kl_div_non_log_target_impl(g: jit_utils.GraphContext, input, target): + log_ = log(g, target) + diff_ = sub(g, log_, input) + output_pos = mul(g, target, diff_) + zeros_ = zeros_like(g, output_pos) + mask_ = gt(g, target, g.op("Constant", value_t=torch.tensor(0))) + output = where(g, mask_, output_pos, zeros_) + return output + + +@_onnx_symbolic("aten::kl_div") +@symbolic_helper.parse_args("v", "v", "i", "b") +@_beartype.beartype +def kl_div(g: jit_utils.GraphContext, input, target, reduction, log_target): + if log_target: + output = _kl_div_log_target_impl(g, input, target) + else: + output = _kl_div_non_log_target_impl(g, input, target) + + if reduction == 0: + return output + elif reduction == 1: + return g.op("ReduceMean", output, keepdims_i=0) + elif reduction == 2: + return symbolic_helper._reducesum_helper(g, output, keepdims_i=0) + else: + return symbolic_helper._onnx_unsupported( + "kl_div with reduction other than none, mean, or sum.", input + ) + + +@_onnx_symbolic("aten::mse_loss") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def mse_loss(g: jit_utils.GraphContext, input, target, reduction): + output = mul(g, sub(g, input, target), sub(g, input, target)) + if reduction == 0: + return output + elif reduction == 1: + return g.op("ReduceMean", output, keepdims_i=0) + elif reduction == 2: + return symbolic_helper._reducesum_helper(g, output, keepdims_i=0) + else: + return symbolic_helper._onnx_unsupported( + "mse_loss with reduction other than none, mean, or sum.", input + ) + + +@_onnx_symbolic("aten::as_strided") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v", "is", "i") +@_beartype.beartype +def as_strided(g: jit_utils.GraphContext, self, sizes, strides, offset=None): + sizes = symbolic_helper._maybe_get_const(sizes, "is") + rank = len(strides) + self_1d = symbolic_helper._reshape_helper( + g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + ) + ind: Optional[torch.Tensor] + if not symbolic_helper._is_value(sizes): + ind = torch.tensor([0], dtype=torch.long) + for i, (size, stride) in enumerate(zip(sizes, strides)): + r_size = [1] * rank + r_size[i] = -1 + ind = ind + torch.arange(size).view(r_size) * stride + if offset: + ind = ind + offset + return g.op("Gather", self_1d, g.op("Constant", value_t=ind)) + else: + ind = None + for i, stride in enumerate(strides): + r_size = [1] * rank + r_size[i] = -1 + size = select( + g, + sizes, + g.op("Constant", value_t=torch.tensor([0])), + g.op("Constant", value_t=torch.tensor(i)), + ) + tmp_ind = symbolic_helper._reshape_helper( + g, + arange(g, size, 4, None, None, None), + g.op("Constant", value_t=torch.tensor(r_size)), + ) + tmp_ind = g.op( + "Mul", tmp_ind, g.op("Constant", value_t=torch.tensor([stride])) + ) + if ind is None: + ind = tmp_ind + else: + ind = g.op("Add", ind, tmp_ind) + if offset: + ind = g.op("Add", ind, g.op("Constant", torch.tensor([offset]))) + return g.op("Gather", self_1d, ind) + + +@_onnx_symbolic("aten::__derive_index") +@_beartype.beartype +def __derive_index(g: jit_utils.GraphContext, index, start, step): + return g.op("Add", start, g.op("Mul", index, step)) + + +@_onnx_symbolic("aten::__range_length") +# Source code for aten op can be found here: pytorch/torch/csrc/jit/runtime/register_prim_ops.cpp +# if (step > 0 && lo < hi) { +# push(stack, 1 + (hi - 1 - lo) / step); +# } else if (step < 0 && lo > hi) { +# push(stack, 1 + (lo - 1 - hi) / (0 - step)); +# } else { +# push(stack, 0); +# } +@_beartype.beartype +def __range_length(g: jit_utils.GraphContext, lo, hi, step): + sub = g.op("Sub", hi, lo) + div = g.op("Ceil", true_divide(g, sub, step)) + return g.op("Cast", div, to_i=_C_onnx.TensorProtoDataType.INT64) + + +@_onnx_symbolic("aten::linear") +@_beartype.beartype +def linear(g: jit_utils.GraphContext, input, weight, bias): + rank = symbolic_helper._get_tensor_rank(input) + weight = t(g, weight) + if rank == 2 and not bias.node().mustBeNone(): + alpha = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + beta = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + output = addmm(g, bias, input, weight, alpha, beta) + else: + output = matmul(g, input, weight) + if not bias.node().mustBeNone(): + output = add(g, bias, output) + + return output + + +@_onnx_symbolic("aten::hann_window") +@symbolic_helper.parse_args("v", "b", "i", "v", "v", "v", "v") +@_beartype.beartype +def hann_window( + g: jit_utils.GraphContext, + window_length, + periodic=True, + dtype: Optional[int] = None, + layout=None, + device=None, + pin_memory=None, + requires_grad=False, +): + if dtype is None: + dtype_ = torch.get_default_dtype() + if not dtype_ or not dtype_.is_floating_point: + dtype_ = torch.float + scalar_type = _type_utils.JitScalarType.from_dtype(dtype_) + else: + scalar_type = _type_utils.JitScalarType(dtype) + + n_array = arange(g, window_length, 4, None, None, None) + output = g.op("Cast", n_array, to_i=_C_onnx.TensorProtoDataType.FLOAT) + output = mul( + g, g.op("Constant", value_t=torch.tensor(math.pi, dtype=torch.float)), output + ) + + if periodic is False: + window_length = sub( + g, window_length, g.op("Constant", value_t=torch.tensor(1, dtype=torch.int)) + ) + output = div(g, output, window_length) + output = g.op( + "Cast", + square(g, sin(g, output)), + to_i=scalar_type.onnx_type(), + ) + + return output + + +@_onnx_symbolic("aten::mv") +@_beartype.beartype +def mv(g: jit_utils.GraphContext, self, vec): + return matmul(g, self, vec) + + +@_onnx_symbolic("aten::dot") +@_beartype.beartype +def dot(g: jit_utils.GraphContext, self, other): + return matmul(g, self, other) + + +@_onnx_symbolic("aten::movedim") +@symbolic_helper.parse_args("v", "t", "t") +@_beartype.beartype +def movedim(g: jit_utils.GraphContext, self, source, destination): + # This is a pythonic implementation mostly taken from aten/src/ATen/native/TensorShape.cpp::movedim + source = source.view(-1) + destination = destination.view(-1) + + assert source.size() == destination.size() + + if (source == destination).all(): + return self + + self_rank = symbolic_helper._get_tensor_rank(self) + assert self_rank is not None + + perm = list(range(self_rank)) + + src_dims = perm.copy() + dst_dims = perm.copy() + + for src, dst in zip(source.tolist(), destination.tolist()): + perm[dst] = src + src_dims[src] = -1 + dst_dims[dst] = -1 + + src_dims = [dim for dim in src_dims if dim != -1] + dst_dims = [dim for dim in dst_dims if dim != -1] + + for src, dst in zip(src_dims, dst_dims): + perm[dst] = src + + return g.op("Transpose", self, perm_i=perm) + + +@_onnx_symbolic("aten::fill") +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def fill(g: jit_utils.GraphContext, self, value): + scalar_type = _type_utils.JitScalarType.from_value( + self, _type_utils.JitScalarType.FLOAT + ) + return full_like(g, self, value, scalar_type) + + +@_onnx_symbolic("aten::index_add") +@_beartype.beartype +def index_add(g: jit_utils.GraphContext, self, dim, index, other, alpha=None): + warnings.warn( + "Warning: ONNX export does not support duplicated values in 'index' field, " + + "this will cause the ONNX model to be incorrect." + ) + + # ONNX does not support "alpha" argument, unlike aten index_add + # See: https://github.com/pytorch/pytorch/pull/65993#issuecomment-953151102 for more context + if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1: + return symbolic_helper._unimplemented("index_add", "alpha != 1", self) + + dim = symbolic_helper._maybe_get_const(dim, "i") + if dim is None: + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting 'index_add_()' function with " + "unknown 'dim' value.", + self, + ) + + self_dim_rank = symbolic_helper._get_tensor_rank(self) + other_dim_rank = symbolic_helper._get_tensor_rank(other) + + if self_dim_rank is None or other_dim_rank is None: + raise errors.SymbolicValueError( + "ONNX export does NOT support exporting 'index_add_()' function while " + "the rank of self tensor or tensor to be added is unknown.", + self, + ) + + if other_dim_rank != self_dim_rank: + delta = self_dim_rank - other_dim_rank + for i in range(delta): + other = symbolic_helper._unsqueeze_helper( + g, other, [symbolic_helper._get_tensor_rank(other)] + ) + + other_dim_size = symbolic_helper._get_tensor_dim_size(other, dim) + self_dim_size = symbolic_helper._get_tensor_dim_size(self, dim) + + if (other_dim_size is not None) and (self_dim_size is not None): + if other_dim_size > self_dim_size: + raise errors.SymbolicValueError( + "ONNX export does not support exporting 'index_add_()' function with " + "duplicated values in 'index' parameter yet.", + self, + ) + + # Construct a new shape. It's almost as same as self except the size of the 'dim' + # dimension is 1, so that we can expand other dimensions as expected. + new_shape_axes = list(range(self_dim_rank)) + new_shape_starts = [0 for i in range(self_dim_rank)] + new_shape_ends = [sys.maxsize if (i != dim) else 1 for i in range(self_dim_rank)] + + new_shape = symbolic_helper._slice_helper( + g, self, axes=new_shape_axes, starts=new_shape_starts, ends=new_shape_ends + ) + other = expand_as(g, other, new_shape) + + for i in range(dim): + index = symbolic_helper._unsqueeze_helper(g, index, [0]) + + for i in range(self_dim_rank - dim - 1): + index = symbolic_helper._unsqueeze_helper( + g, index, [symbolic_helper._get_tensor_rank(index)] + ) + + return scatter_add(g, self, dim, expand_as(g, index, other), other) + + +@_onnx_symbolic("aten::roll") +@symbolic_helper.parse_args("v", "is", "is") +@_beartype.beartype +def roll(g: jit_utils.GraphContext, self, shifts, dims): + assert len(shifts) == len(dims) + + result = self + for i in range(len(shifts)): + shapes = [] + shape = symbolic_helper._slice_helper( + g, result, axes=[dims[i]], starts=[-shifts[i]], ends=[sys.maxsize] + ) + shapes.append(shape) + shape = symbolic_helper._slice_helper( + g, result, axes=[dims[i]], starts=[0], ends=[-shifts[i]] + ) + shapes.append(shape) + result = g.op("Concat", *shapes, axis_i=dims[i]) + + return result + + +@_onnx_symbolic("aten::cross") +@symbolic_helper.parse_args("v", "v", "i") +@_beartype.beartype +def cross(g: jit_utils.GraphContext, input, other, dim=None): + dim = symbolic_helper._get_dim_for_cross(input, dim) + # If we have two tensors such that + # A = [a, b, c], B = [d, e, f], we permute the tensor such that we have + # After first roll, + # A' = [b, c, a], B' = [f, d, e], so that we calculate (b*f, c*d, a*e) + roll_x_1 = roll(g, input, [2], [dim]) + roll_y_1 = roll(g, other, [1], [dim]) + # After second roll, + # A' = [c, a, b], B' = [e, f, d], so that we calculate (c*e, a*f, b*d) + roll_x_2 = roll(g, input, [1], [dim]) + roll_y_2 = roll(g, other, [2], [dim]) + # cross product is calculated as + # result = [(b*f - c*e), (c*d - a*f), (a*e - b*d)] + return sub(g, mul(g, roll_x_1, roll_y_1), mul(g, roll_x_2, roll_y_2)) + + +@_onnx_symbolic("aten::cdist") +@_beartype.beartype +def cdist( + g: jit_utils.GraphContext, + x1, + x2, + p=2.0, + compute_mode="use_mm_for_euclid_dist_if_necessary", +): + # X1.shape = (B * P * D), X2.shape = (B * R * D) + # In order to respect numpy style broadcasting as demonstrated in + # https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md + # we unsqueeze both input tensors + # Currently we ignore the 'compute_mode' variable as we use default to + # using matrix multiplication to calculate the euclidean distance + rank = symbolic_helper._get_tensor_rank(x1) + assert rank is not None + broadcasted_x1 = symbolic_helper._unsqueeze_helper(g, x1, [rank - 1]) + broadcasted_x2 = symbolic_helper._unsqueeze_helper(g, x2, [rank - 2]) + return pairwise_distance( + g, broadcasted_x1, broadcasted_x2, p, eps=1e-06, keepdim=False + ) + + +@_onnx_symbolic("aten::lerp") +@_beartype.beartype +def lerp(g: jit_utils.GraphContext, self, end, weight): + # Conditional for better numeric. This has been discussed in + # https://github.com/pytorch/pytorch/pull/18871 + diff = g.op("Sub", end, self) + return where( + g, + g.op("Less", weight, g.op("Constant", value_t=torch.tensor(0.5))), + g.op("Add", self, g.op("Mul", weight, diff)), + g.op( + "Sub", + end, + g.op( + "Mul", + diff, + g.op("Sub", g.op("Constant", value_t=torch.tensor(1.0)), weight), + ), + ), + ) + + +@_onnx_symbolic("aten::broadcast_tensors") +@_beartype.beartype +def broadcast_tensors(g: jit_utils.GraphContext, self): + all_tensors = symbolic_helper._unpack_list(self) + t_with_final_shape = zeros_like(g, all_tensors[0]) + + # Add operator supports multidirectional broadcasting. So we leverage this function + # to infer the final shape generated by the broadcast. + for t in all_tensors: + t_with_final_shape = add(g, t_with_final_shape, t) + + t_list = [expand_as(g, t, t_with_final_shape) for t in all_tensors] + return g.op("prim::ListConstruct", *t_list) + + +@_onnx_symbolic("aten::is_pinned") +def is_pinned(g: jit_utils.GraphContext, self, device=None): + # Unused by ONNX. + return None + + +@_onnx_symbolic("prim::ConstantSplit") +@_beartype.beartype +def prim_constant_split(g: jit_utils.GraphContext, self, split_size, dim): + size = symbolic_helper._get_tensor_dim_size(self, dim) + if size is None: + return symbolic_helper._unimplemented( + "prim::ConstantSplit", "unknown dimension size", self + ) + splits = [split_size] * (size // split_size) + leftover = size % split_size + if leftover: + splits.append(leftover) + return g.op("Split", self, split_i=splits, axis_i=dim, outputs=len(splits)) + + +# TODO: It would be better to export this as a chunk directly, as this is +# less sensitive to changes in input size. +# TODO: Once we have proper scoping, stop reimplementing chunk, delete this +# method, and use the desugared version +@_onnx_symbolic("prim::ConstantChunk") +@_beartype.beartype +def prim_constant_chunk(g: jit_utils.GraphContext, self, chunks, dim): + dim_size = symbolic_helper._get_tensor_dim_size(self, dim) + if dim_size is None: + return symbolic_helper._unimplemented( + "prim::ConstantChunk", "unknown dimension size", self + ) + split_size = (dim_size + chunks - 1) // chunks + return prim_constant_split(g, self, split_size, dim) + + +@_onnx_symbolic("prim::shape") +@_beartype.beartype +def prim_shape(g: jit_utils.GraphContext, self): + return g.op("Shape", self) + + +@_onnx_symbolic("prim::max") +@_beartype.beartype +def prim_max(g: jit_utils.GraphContext, self, other): + return _op_with_optional_float_cast(g, "Max", self, other, opset_before=12) + + +@_onnx_symbolic("prim::min") +@_beartype.beartype +def prim_min(g: jit_utils.GraphContext, self, other=None): + if not other: + if symbolic_helper._is_packed_list(self): + self = stack(g, self, g.op("Constant", value_t=torch.tensor([0]))) + return min(g, self) + return min(g, self, other) + + +@_onnx_symbolic("prim::data") +@_beartype.beartype +def prim_data(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("prim::layout") +def prim_layout(g: jit_utils.GraphContext, self): + # Always return 'torch.strided'. Other layout types are not supported by JIT 'TensorType'. + # Layout class defined in 'c10/core/Layout.h'. + return g.op("Constant", value_t=torch.tensor(0)) + + +@_onnx_symbolic("prim::ListConstruct") +@_beartype.beartype +def prim_list_construct(g: jit_utils.GraphContext, *inputs, **kwargs): + return None + + +@_onnx_symbolic("prim::ListUnpack") +@_beartype.beartype +def prim_list_unpack( + g: jit_utils.GraphContext, *inputs, **kwargs +) -> Optional[List[_C.Value]]: + if len(inputs) == 1 and inputs[0].node().kind() == "prim::ListConstruct": + # Cancel the previous node if it is ListConstruct by returning its inputs + # TODO(justinchuby): Use a public method in the helper module + return symbolic_helper._unpack_list(inputs[0]) + + return None + + +@_onnx_symbolic("prim::TupleConstruct") +@_beartype.beartype +def prim_tuple_construct(g: jit_utils.GraphContext, *inputs, **kwargs): + return None + + +@_onnx_symbolic("prim::Uninitialized") +@_beartype.beartype +def prim_uninitialized(g: jit_utils.GraphContext, *inputs, **kwargs): + return None + + +# exists to refine the type of the Value +# if x is an optional Tensor, unchecked_cast will cast +# x to Tensor, so the rest of the graph knows that x is a Tensor +# this doesn't do anything in runtime and is a noop in ONNX +@_onnx_symbolic("prim::unchecked_cast") +@_beartype.beartype +def prim_unchecked_cast(g: jit_utils.GraphContext, self): + return self + + +@_onnx_symbolic("prim::dtype") +@_beartype.beartype +def prim_dtype(g: jit_utils.GraphContext, self): + scalar_type = symbolic_helper._try_get_scalar_type(self) + if scalar_type is None: + scalar_type = _type_utils.JitScalarType.FLOAT + # This node records a torch dtype as int + return g.op("Constant", value_t=torch.tensor(scalar_type)) + + +@_onnx_symbolic("prim::tolist") +@_beartype.beartype +def prim_tolist(g: jit_utils.GraphContext, input, dim_val, elem_ty_val): + """tolist is currently supported only for 1D input tensors. + + dim_val and elem_ty_val represent dimension and type annotations + that need to match dimension and type of the input tensor. + """ + dim = symbolic_helper._maybe_get_const(dim_val, "i") + if dim > 1: + return symbolic_helper._unimplemented("prim::tolist", "dim_val > 1", input) + return input + + +# ----------------------------------------------------------------------------- +# Symbolic functions that need extra context +# ----------------------------------------------------------------------------- +@_onnx_symbolic("prim::device") +@_beartype.beartype +def prim_device(g: jit_utils.GraphContext, *inputs, **kwargs) -> None: + output_type = g.original_node.output().type() + if isinstance(output_type, _C.DeviceObjType): + return None + + return symbolic_helper._unimplemented( + "prim::device", + f"output type should be 'DeviceObjType', not '{output_type.kind()}'", + g.original_node.output(), + ) + + +@_onnx_symbolic("prim::Loop") +@_beartype.beartype +def prim_loop(g: jit_utils.GraphContext, *inputs, **attrs) -> List[_C.Value]: + node = g.original_node + env = g.env + params_dict = g.params_dict + + operator_export_type = GLOBALS.operator_export_type + opset_version = GLOBALS.export_onnx_opset_version + + old_blocks = tuple(node.blocks()) + new_op_outputs, new_block_contexts, new_node = jit_utils.add_op_with_blocks( + g, "Loop", *inputs, outputs=node.outputsSize(), n_blocks=len(old_blocks) + ) + + for old_block, new_block_context in zip(old_blocks, new_block_contexts): + # Copy input metadata to subblock + # + # prim::Loop(iter, cond, input_1, ..., input_n) + # block0(iter, input_1, ..., input_n) + # + # For `Loop` node, copy metadata for `iter`, `input_1`, ..., `input_n`. + for i, b_in in enumerate(old_block.inputs()): + if i == 0 and i < len(inputs): + b_in.setType(inputs[i].type()) + # For optional block inputs, they may switch between None not-None inside + # the loop body, so if the loop input is not optional, the block input may + # still need to be optional. + if ( + i > 0 + and (i + 1) < len(inputs) + and not isinstance(b_in.type(), _C.OptionalType) + ): + b_in.setType(inputs[i + 1].type()) + torch._C._jit_pass_onnx_block( + old_block, + new_block_context.block, + operator_export_type, + env, + False, + ) + fixed_outputs = torch._C._jit_pass_fixup_onnx_controlflow_node( + new_node, opset_version + ) + # Run shape type inference for Loop after subblock is converted. + if GLOBALS.onnx_shape_inference: + torch._C._jit_pass_onnx_node_shape_type_inference( + new_node, params_dict, opset_version + ) + return fixed_outputs + + +@_onnx_symbolic("prim::If") +@_beartype.beartype +def prim_if(g: jit_utils.GraphContext, *inputs, **attrs) -> List[_C.Value]: + n = g.original_node + block = g.block + env = g.env + params_dict = g.params_dict + + operator_export_type = GLOBALS.operator_export_type + opset_version = GLOBALS.export_onnx_opset_version + + static_if = inputs[0].node().kind() == "onnx::Constant" + if static_if: + # Fold static if + # + # The torch IR + # graph(%embedding_matrix.1 : Float(10, 15, strides=[15, 1], requires_grad=0, device=cpu), + # %input.1 : Long(6, strides=[1], requires_grad=0, device=cpu), ... + # %65 : Bool(requires_grad=0, device=cpu) = prim::Constant[value={0}]() + # %21 : Long(device=cpu) = aten::eq(%20, %64) + # %22 : Long(device=cpu) = prim::If(%21) + # block0(): + # %23 : Long(device=cpu) = aten::is_floating_point(%input.1) + # -> (%23) + # block1(): + # -> (%65) + # %input.53 : Tensor, %weight : Tensor = prim::If(%22) + # block0(): + # -> (%embedding_matrix.1, %input.1) + # block1(): + # -> (%input.1, %embedding_matrix.1) + # %26 : int[] = aten::size(%input.53) + # + # The converted ONNX graph + # %10 : Bool(device=cpu) = onnx::Constant[value={0}]() + # %14 : Bool(device=cpu) = onnx::Equal(%13, %8) + # %15 : Bool(requires_grad=0, device=cpu) = onnx::Constant[value={0}]() + # %16 : Long(1, strides=[1], device=cpu) = onnx::Shape(%input.1) + input_flag = symbolic_helper._node_get(inputs[0].node(), "value").tolist() + const_value = ( + all(input_flag) if isinstance(input_flag, list) else bool(input_flag) + ) + block_idx = 0 if const_value else 1 + current_b = list(n.blocks())[block_idx] + env = torch._C._jit_pass_onnx_block( + current_b, + block, + operator_export_type, + env, + True, + ) + if_output_list = list(n.outputs()) + current_b_list = list(current_b.outputs()) + + final_b_list = [] + for idx in range(len(if_output_list)): + if current_b_list[idx] not in env: + raise errors.SymbolicValueError( + f"The sub block ATen output {current_b_list[idx]} is not in env.", + current_b_list[idx], + ) # type:ignore[operator] + onnx_b = env[current_b_list[idx]] + final_b_list.append(onnx_b) + return final_b_list + else: + old_blocks = tuple(n.blocks()) + new_op_outputs, new_block_contexts, new_node = jit_utils.add_op_with_blocks( + g, "If", *inputs, outputs=n.outputsSize(), n_blocks=len(old_blocks) + ) + + for old_block, new_block_context in zip(old_blocks, new_block_contexts): + torch._C._jit_pass_onnx_block( + old_block, + new_block_context.block, + operator_export_type, + env, + False, + ) + fixed_outputs = torch._C._jit_pass_fixup_onnx_controlflow_node( + new_node, opset_version + ) + # Run shape type inference for If after subblock is converted. + if GLOBALS.onnx_shape_inference: + torch._C._jit_pass_onnx_node_shape_type_inference( + new_node, params_dict, opset_version + ) + return fixed_outputs + + +@_onnx_symbolic("prim::Constant") +@_beartype.beartype +def prim_constant(g: jit_utils.GraphContext, *inputs, **attrs): + node = g.original_node + + if node.mustBeNone(): + return None + # This must go before checking for string values, because some device constants + # have string values, but we want to keep them as unconverted Device types so + # that eq() can work on them. + if isinstance(node.output().type(), _C.DeviceObjType): + return None + if node.kindOf("value") == "t": + return g.op("Constant", value_t=symbolic_helper._node_get(node, "value")) + if node.kindOf("value") == "s": + return g.op("Constant", value_s=symbolic_helper._node_get(node, "value")) + if node.output().type().isSubtypeOf( + _C.ListType.ofInts() + ) or node.output().type().isSubtypeOf(_C.ListType.ofFloats()): + return g.op( + "Constant", value_t=torch.tensor(symbolic_helper._node_get(node, "value")) + ) + if node.output().type().isSubtypeOf(_C.ListType.ofStrings()): + str_constants = [ + g.op("Constant", value_s=s) + for s in symbolic_helper._node_get(node, "value") + ] + return g.op("prim::ListConstruct", *str_constants) + + raise errors.SymbolicValueError( + f"Unsupported prim::Constant kind: '{node.kindOf('value')}'. " + f"Please send a bug report at {_constants.PYTORCH_GITHUB_ISSUES_URL}.", + node.output(), + ) + + +@_onnx_symbolic("prim::type") +@_beartype.beartype +def prim_type(g: jit_utils.GraphContext, device_value: _C.Value, *args, **kwargs): + if device_value.node().kind() == "prim::device": + device = jit_utils.get_device_from_value(device_value.node().input()) + if device is not None: + return g.op("Constant", value_s=str(device)) + + return symbolic_helper._unimplemented( + "prim::type", + "Device type cannot be statically determined.", + device_value, + ) + + +@_onnx_symbolic("onnx::Placeholder") +@_beartype.beartype +def onnx_placeholder(g: jit_utils.GraphContext, *inputs, **attrs): + node = g.original_node + block = g.block + env = g.env + + return torch._C._jit_onnx_convert_pattern_from_subblock(block, node, env) + + +@_onnx_symbolic("aten::resolve_conj") +@_onnx_symbolic("aten::resolve_neg") +@_beartype.beartype +def noop_complex_operators(g: jit_utils.GraphContext, input: _C.Value): + # ONNX does not have operators to *directly* manipulate real/imaginary components + # However, a few torch APIs (e.g. .tolist()) use complex operations when input is real, + # which results in failures due to missing operators for complex numbers + + # `aten::resolve_conj` and `aten::resolve_neg` can safely be implemented as no-op + return input + + +@_onnx_symbolic("aten::_conj") +@_onnx_symbolic("aten::conj_physical") +@_beartype.beartype +def unsupported_complex_operators(g: jit_utils.GraphContext, input: _C.Value): + # ONNX does not have operators to *directly* manipulate real/imaginary components + # However, a few torch APIs (e.g. .tolist()) use complex operations when input is real, + # which results in failures due to missing operators for complex numbers + + # While `aten::_conj` and `aten::conj_physical` raise exception when input is complex + if symbolic_helper.is_complex_value(input): + # FIXME(justinchuby): report correct name for symbolic being executed + return symbolic_helper._onnx_unsupported( + "aten::_conj, aten::conj_physical", + input, + ) + + # they can safely be implemented as no-op for real numbers only + return noop_complex_operators(g, input) + + +@_onnx_symbolic("aten::logit") +@_beartype.beartype +def logit(g: jit_utils.GraphContext, self: torch._C.Value, eps: torch._C.Value): + one = g.op("Constant", value_t=torch.tensor(1.0)) + + if not symbolic_helper._is_none(eps): + eps = g.op( + "Cast", eps, to_i=_type_utils.JitScalarType.from_value(self).onnx_type() + ) + one_sub_eps = g.op("Sub", one, eps) + self_less_equal_one_sub_eps = g.op("Greater", one_sub_eps, self) + temporary_self = g.op("Where", self_less_equal_one_sub_eps, self, one_sub_eps) + + temporary_self_less_eps = g.op("Less", temporary_self, eps) + z = g.op("Where", temporary_self_less_eps, eps, temporary_self) + else: + z = self + + sub = g.op("Sub", one, z) + div = g.op("Div", z, sub) + return g.op("Log", div) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..282cfb04d36cc5f3457d5d5835047dd3d5418597 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0314884681d63f7ed2b43d3f04f4b7ca6949f19a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8a9af9e5991e8eceeb0c995a0f8dd4fc63fdbf2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db152c76bbd96075ca12a6e83ea8d78661f757e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..897b81340c9bac667ebc5198e9587ae35534243d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53234e64fc850fbcd950eb4e87fcd1e666fe3113 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a9c055c66a2a44ec338f7dd1f69d4186b12c97e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bbc42f8d17033c1b3c7be4305cdfce090d28f66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d205b64d72578a57073c8525798ce0d861ac5f1c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1d0bf46f75fc8582975245b7c460137c04c20b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e303b194a06a0f66c32e6e5bc3d6e3e5a74aae12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84008dc4d8bd88d856af7463395dbd6ad70b4df1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa45e4f28ce0bcc939930781a29d92040df10a2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py @@ -0,0 +1,98 @@ +# mypy: ignore-errors + +import sys +from functools import wraps, partial + +import torch +import torch.distributed as dist +from torch.distributed import rpc +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + tp_transports, +) + +TEST_GPU_NUM = 4 + +class ShardedTensorTestBase(MultiProcessTestCase): + @property + def world_size(self): + return TEST_GPU_NUM + + def init_pg(self, backend="nccl"): + if backend not in ["nccl", "gloo", "mpi"]: + raise RuntimeError(f"Backend {backend} not supported!") + + dist.init_process_group( + backend=backend, + world_size=self.world_size, + rank=self.rank, + init_method=f"file://{self.file_name}", + ) + + # set device for nccl pg for collectives + if backend == "nccl": + torch.cuda.set_device(self.rank) + + + def init_rpc(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports()) + rpc_backend_options.init_method = f"file://{self.file_name}" + for rank in range(self.world_size): + rpc_backend_options.set_device_map( + f"worker{rank}", {rank: self.rank, self.rank: rank} + ) + + rpc.init_rpc( + name="worker%d" % self.rank, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + def init_comms(self, init_rpc=True, backend="nccl"): + if init_rpc: + self.init_rpc() + self.init_pg(backend=backend) + + def destroy_comms(self, destroy_rpc=True): + # Wait for all ranks to reach here before starting shutdown. + dist.barrier() + + if destroy_rpc: + rpc.shutdown() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + def assert_sharded_tensor_equal(self, st1, st2): + st1_local_shards = st1.local_shards() + st2_local_shards = st2.local_shards() + self.assertEqual(len(st1_local_shards), len(st2_local_shards)) + for i, st1_local_shard in enumerate(st1_local_shards): + self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor) + self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata) + + self.assertEqual(st1.metadata(), st2.metadata()) + self.assertEqual(st1.sharding_spec(), st2.sharding_spec()) + self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards())) + +# wrapper to initialize comms (processgroup + rpc) +def with_comms(func=None, init_rpc=True, backend="nccl"): + if func is None: + return partial( + with_comms, + init_rpc=init_rpc, + backend=backend, + ) + + @wraps(func) + def wrapper(self, *args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + self.init_comms(init_rpc=init_rpc, backend=backend) + func(self, *args, **kwargs) + self.destroy_comms(destroy_rpc=init_rpc) + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d028922b58ac95408e188c8fcf96f8e773a17e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1b7f5178bf42c181b8f5fda456f6ec79629749c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a98572534f8905ad96b56cc9afeaa1c889f27ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py new file mode 100644 index 0000000000000000000000000000000000000000..a3fe0584d75bd43a6f023b6ec8bddef2e7acba42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py @@ -0,0 +1,136 @@ +# mypy: ignore-errors + +import builtins + +import torch +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, + EnumerableShardingSpec, + ShardMetadata, +) +from torch.distributed._shard.sharding_spec._internals import ( + get_chunked_dim_size, + get_split_size, +) + + +def generate_chunk_sharding_specs_for_test(sharding_dim): + return [ + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ), + # Test different ordering. (Case 1) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:2/cuda:2", + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + ], + ), + # Test different ordering. (Case 2) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + ], + ), + ] + + +def generate_enumerable_sharding_specs_for_test(): + return [ + EnumerableShardingSpec( + [ + ShardMetadata( + shard_offsets=[0, 0], + shard_sizes=[5, 5], + placement="rank:0/cuda:0", + ), + ShardMetadata( + shard_offsets=[5, 0], + shard_sizes=[5, 5], + placement="rank:1/cuda:1", + ), + ShardMetadata( + shard_offsets=[0, 5], + shard_sizes=[5, 5], + placement="rank:2/cuda:2", + ), + ShardMetadata( + shard_offsets=[5, 5], + shard_sizes=[5, 5], + placement="rank:3/cuda:3", + ), + ] + ) + ] + + +def generate_local_weight_sharding_params_for_test( + local_weight, sharded_dim, gpu_num, spec, rank +): + """ + Shard the local weight based the given spec, so we can compare against + the one from sharded tensor. + + Args: + local_weight: weight matrix to be sharded. + sharded_dim: The dimension which we shard on. + gpu_num: number of ranks. + spec: sharding spec. + rank: # of cuda process. + + Returns: + start_pos: start position of sharded weight on the given rank. + chunk_size: chunk size of sharded weight on the given rank. + """ + sharding_dim_size = local_weight.size(sharded_dim) + split_size = get_split_size(sharding_dim_size, gpu_num) + current_offsets = 0 + start_pos = current_offsets + for idx, placement in enumerate(spec.placements): + chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + if rank == placement.rank(): + start_pos = current_offsets + break + current_offsets += chunk_size + return start_pos, chunk_size + + +def clone_module_parameter(module, param_name): + """ + Clone a parameter from a given existing module. + + Args: + module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned. + param_name (str): Name of the parameter of ``module`` that needs to be cloned. + + Returns: cloned tensor as :class:`torch.nn.Parameter`. + """ + tensor = getattr(module, param_name) + return torch.nn.Parameter(tensor.detach().clone()) + +def gen_binary_op_func(python_op, inplace=False): + src_lines = ['def f(lhs, rhs):'] + if "torch" in python_op: + src_lines.append(f' return {python_op}(lhs, rhs)\n') + elif inplace: + src_lines.append(f' lhs {python_op}= rhs\n return lhs\n') + else: + src_lines.append(f' return lhs {python_op} rhs\n') + + code_str = '\n'.join(src_lines) + g = {'torch': torch} + builtins.exec(code_str, g) + return g["f"] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9595dc462c7a39949b2176895c9df2d6dfdaadbf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3df7a83f6f5d463257abaee40327832a8df3fcaf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py new file mode 100644 index 0000000000000000000000000000000000000000..41e72c6e54c790a01882ec40ffb193af6d6f999a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py @@ -0,0 +1,486 @@ +# mypy: ignore-errors + +# Copyright (c) Meta Platforms, Inc. and affiliates + +import itertools +from dataclasses import dataclass +import sys +from functools import wraps +from typing import ( + Any, + Callable, + Iterator, + Tuple, + Dict, + List, + Sequence, + TypeVar, + cast, +) + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F + +from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + TEST_SKIPS, + skip_if_lt_x_gpu, +) + + +from torch.distributed._tensor import ( + DeviceMesh, + Shard, + Replicate, + distribute_tensor, +) +from torch.distributed._tensor.placement_types import Placement + +DEVICE_TYPE = "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu" +PG_BACKEND = "nccl" if DEVICE_TYPE == "cuda" else "gloo" + +NUM_DEVICES = 4 + +# We use this as a proxy for "multiple GPUs exist" +if torch.cuda.is_available() and torch.cuda.device_count() > 1: + # when we actually have multiple GPUs, relax the requirement to smaller counts. + NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count()) + +T = TypeVar("T") + + +# simple RMSNorm layer for testing +class RMSNormPython(torch.nn.Module): + def __init__(self, dim: int, eps: float = 1e-6): + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.ones(dim)) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x) + return output * self.weight + +class MLPModule(nn.Module): + def __init__(self, device): + super().__init__() + torch.manual_seed(5) + self.net1 = nn.Linear(10, 16, device=device) + self.relu = nn.ReLU() + self.net2 = nn.Linear(16, 10, device=device) + + def forward(self, x): + return self.net2(self.relu(self.net1(x))) + + def reset_parameters(self): + self.net1.reset_parameters() + self.net2.reset_parameters() + + +@dataclass +class ModelArgs: + n_layers: int = 2 + vocab_size: int = 16 + max_seq_len: int = 16 + dim: int = 8 + n_heads: int = 4 + dropout_p: float = 0.1 + use_attn_mask: bool = True + weight_tying: bool = True + checkpoint_activations: bool = False + +class Attention(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + assert args.dim % args.n_heads == 0 + self.head_dim = args.dim // args.n_heads + self.n_heads = args.n_heads + self.dropout_p = args.dropout_p + self.resid_dropout = nn.Dropout(args.dropout_p) + self.use_attn_mask = args.use_attn_mask + + self.wq = nn.Linear(args.dim, args.dim, bias=False) + self.wk = nn.Linear(args.dim, args.dim, bias=False) + self.wv = nn.Linear(args.dim, args.dim, bias=False) + self.wo = nn.Linear(args.dim, args.dim, bias=False) + + def forward(self, x): + bsz, seq_len, _ = x.size() + queries, keys, values = self.wq(x), self.wk(x), self.wv(x) + queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim) + keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim) + values = values.view(bsz, seq_len, self.n_heads, self.head_dim) + + queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + + output = F.scaled_dot_product_attention( + queries, keys, values, None, + self.dropout_p if self.training else 0, + self.use_attn_mask, + ) + output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1) + return self.resid_dropout(self.wo(output)) + +class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout_p): + super().__init__() + self.w1 = nn.Linear(dim, hidden_dim) + self.gelu = nn.GELU() + self.w2 = nn.Linear(hidden_dim, dim) + self.resid_dropout = nn.Dropout(dropout_p) + + def forward(self, x): + return self.resid_dropout(self.w2(self.gelu(self.w1(x)))) + +class TransformerBlock(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.attention_norm = nn.LayerNorm(args.dim) + self.attention = Attention(args) + self.ffn_norm = nn.LayerNorm(args.dim) + self.feed_forward = FeedForward(args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p) + + def forward(self, x): + h = x + self.attention(self.attention_norm(x)) + out = h + self.feed_forward(self.ffn_norm(h)) + return out + +# A toy transformer model, partly inspired by the nanoGPT model: +# https://github.com/karpathy/nanoGPT. +class Transformer(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + assert args.vocab_size is not None + assert args.max_seq_len is not None + self.max_seq_len = args.max_seq_len + self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim) + self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim) + self.dropout = nn.Dropout(args.dropout_p) + self.layers = nn.ModuleList() + for _ in range(args.n_layers): + self.layers.append(TransformerBlock(args)) + self.norm = nn.LayerNorm(args.dim) + self.output = nn.Linear(args.dim, args.vocab_size, bias=False) + if args.weight_tying: + self.output.weight = self.tok_embeddings.weight + self.checkpoint_activations = args.checkpoint_activations + + def forward(self, tokens): + _bsz, seq_len = tokens.size() + assert seq_len <= self.max_seq_len + h = self.tok_embeddings(tokens) + pos = torch.arange(0, seq_len, device=tokens.device) + p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim) + h = h + p + h = self.dropout(h) + for layer in self.layers: + if self.checkpoint_activations: + h = torch.utils.checkpoint.checkpoint(layer, h, use_reentrant=False) + else: + h = layer(h) + h = self.norm(h) + output = self.output(h).float() + return output + + +def skip_unless_torch_gpu(method: T) -> T: + """ + Test decorator which skips the test unless there's a GPU available to torch. + + >>> # xdoctest: +SKIP + >>> @skip_unless_torch_gpu + >>> def test_some_method(self) -> None: + >>> ... + """ + # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set. + return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method)) + + +class DTensorTestBase(MultiProcessTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def backend(self) -> str: + return PG_BACKEND + + def build_device_mesh(self) -> DeviceMesh: + return DeviceMesh(DEVICE_TYPE, list(range(self.world_size))) + + def init_pg(self) -> None: + if "nccl" in self.backend and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + + if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]: + raise RuntimeError(f"Backend {self.backend} not supported!") + + dist.init_process_group( + backend=self.backend, + world_size=self.world_size, + rank=self.rank, # pyre-ignore[16] + init_method=f"file://{self.file_name}", # pyre-ignore[16] + ) + + # set device for nccl pg for collectives + if "nccl" in self.backend: + torch.cuda.set_device(self.rank) + + def destroy_pg(self) -> None: + # Wait for all ranks to reach here before starting shutdown. + # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895 + # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu")) + # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs: + # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion + dist.barrier() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + # pyre-ignore[2]: + def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None: + out = op_call(*args, **kwargs) + dtc = DTensorConverter(mesh, args, kwargs) + for d_args, d_kwargs in dtc: + # pyre can't find assertTrue anymore? + self.assertEqual(dtc.successful(), True) + d_out = op_call(*d_args, **d_kwargs) + self.assertEqual(d_out.full_tensor(), out) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + +TestFunc = Callable[[object], object] + +# wrapper to initialize comms (processgroup) +def with_comms(func: TestFunc) -> TestFunc: + assert func is not None + + @wraps(func) # pyre-ignore[6] + def wrapper( + self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc] + ) -> None: + # if backend not specified, and cuda available, then use nccl, else gloo + if torch.cuda.is_available() and torch.cuda.device_count() >= self.world_size: + self.device_type = "cuda" + else: + self.device_type = "cpu" + + self.init_pg() + func(self, *args, **kwargs) # type: ignore[misc] + self.destroy_pg() + + return wrapper + + +def run_subtests( + cls_inst, + subtest_config: Dict[str, List[Any]], + test_fn: Callable, + *test_args, + **test_kwargs: Any, +): + """ + Runs a test function given by ``test_fn`` as a subtest according to the + configurations specified by ``subtest_config``. This amortizes the + costly setup overhead (including process spawn and initializing the + process group) over the subtests. + + Args: + subtest_config (Dict[str, List[Any]]): A mapping from subtest + keyword argument name to a list of its possible values. + test_fn (Callable): A callable that runs the actual test. + test_args: Positional arguments to pass to ``test_fn``. + test_kwargs: Keyword arguments to pass to ``test_fn``. + """ + # Convert the config mapping to a list to have a fixed order + subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items()) + subtest_config_keys: List[str] = [item[0] for item in subtest_config_items] + subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items] + for values in itertools.product(*subtest_config_values): + # Map keyword to chosen value + subtest_kwargs = dict(zip(subtest_config_keys, values)) + with cls_inst.subTest(**subtest_kwargs): + test_fn(*test_args, **test_kwargs, **subtest_kwargs) + dist.barrier() + + +class DTensorOpTestBase(MultiThreadedTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def device_type(self) -> str: + return DEVICE_TYPE + + def build_device_mesh(self): + return DeviceMesh(self.device_type, list(range(self.world_size))) + + def setUp(self) -> None: + super().setUp() + self._spawn_threads() + + +# This is a class for converting args/kwargs of an op into distributed args/kwargs +class DTensorConverter: + def __init__( + self, + mesh: DeviceMesh, + args: Tuple[object, ...], + kwargs: Dict[str, object], + ) -> None: + self.hit = 0 + self.miss = 0 + self.mesh = mesh + self.args = args + self.kwargs = kwargs + flatten_args, flatten_args_spec = tree_flatten(args) + flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs) + + self.flatten_args: List[object] = flatten_args + self.flatten_args_spec: TreeSpec = flatten_args_spec + self.flatten_kwargs: List[object] = flatten_kwargs + self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec + + choices_for_args = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + self.sharding_combs: Iterator[Sequence[Placement]] = iter( + itertools.product(*choices_for_args) + ) + + def successful(self) -> bool: + return self.hit > 0 and self.miss == 0 + + def is_supported_tensor(self, t: torch.Tensor) -> bool: + # TODO: dist tensor need to support quantized and sparse + # tensors, quantized tensor might be relatively easy, but + # sparse tensor have special layouts that we need to possibly + # deal with, until we are clear about them, we don't officially + # support them. + return not any( + [ + t.is_sparse_csr, + t.is_sparse, + t.is_mkldnn, + t.is_quantized, + t.is_nested, + torch._is_functional_tensor(t), + t.is_neg(), + t.is_conj(), + t.device.type in ("lazy", "meta"), + # We need a way to test if a tensor is batched but there + # is no official APi to do it + # torch._C._is_batched(t), + ] + ) + + def gen_sharding_choices_for_arg( + self, arg: torch.Tensor + ) -> Sequence[Placement]: + mesh_size = self.mesh.size() + sharding_choices: List[Placement] = [Replicate()] + # c10d collective does not support bool tensor + # for bool tensor we treat it as replicated + if arg.dtype != torch.bool: + # only generating choices with: replicate, or sharding + # evenly on a dimension that could be sharded + sharding_choices = sharding_choices + [ + Shard(i) + for i, s in enumerate(arg.shape) + if s > 1 and s % mesh_size == 0 + ] + # TODO: add multi mesh choices + # all_choices = itertools.product( + # *(self.mesh.ndim * [sharding_choices]) + # ) + return sharding_choices + + def __iter__(self) -> "DTensorConverter": + return self + + def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]: + try: + next_sharding_choices = next(self.sharding_combs) + idx = 0 + + new_args: List[object] = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + new_args.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_args.append(arg) + + new_kwargs: List[object] = [] + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + new_kwargs.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_kwargs.append(arg) + + return ( + tree_unflatten(new_args, self.flatten_args_spec), + tree_unflatten(new_kwargs, self.flatten_kwargs_spec), + ) + except StopIteration as e: + raise StopIteration from e + + def to_dist_tensor( + self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement] + ) -> torch.Tensor: + if type(t) is torch.Tensor or type(t) is nn.Parameter: + if self.is_supported_tensor(t): + self.hit += 1 + if t.ndim == 0: + # scalar tensor by default will be replicated + r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim) + else: + # distribute non-scalar tensors + r = distribute_tensor(t, mesh, placements) + if type(t) is nn.Parameter: + r = nn.Parameter( # type: ignore[assignment] + r, requires_grad=r.requires_grad + ) + return r + else: + self.miss += 1 + return t + elif torch.overrides.is_tensor_like(t): + # Blindly converting tensor subclasses to dist tensor can cause + # unpredictable problems, we explicitly disable this conversion + # for now (i.e. we don't support DTensor holding tensor subclass + # until there's a strong reason later). + self.miss += 1 + return t + else: + raise RuntimeError( + f"Trying to convert to DTensor, but got {type(t)}" + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee650f1e63e8f1d45bcab76e6520fcf9a0dad9c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae67cafabcda34f664a32322968b0b938b36ee8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7facfb5a8c127756b0d348fbb05a13be9f6efb1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py new file mode 100644 index 0000000000000000000000000000000000000000..60857685b88514bbf6a1451f007c294de70f51e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -0,0 +1,734 @@ +# mypy: ignore-errors + +import enum +from typing import Tuple + +import torch +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils as dist_utils +from torch import Tensor, nn +from torch._jit_internal import Future +from torch.distributed.nn import RemoteModule +from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES +from torch.distributed.nn.api.remote_module import _RemoteModule +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +_PARAM_VAL = torch.nn.Parameter(torch.ones(1)) + + +# RPC handler for querying the device on the destination worker. +def remote_device(module_rref): + for param in module_rref.local_value().parameters(): + return param.device + + +# RPC handler for querying __dict__ on the destination worker. +def remote_module_attributes(remote_module): + return remote_module.__dict__ + + +# RPC handler for running forward on the destination worker. +def remote_forward(remote_module, args): + return remote_module.forward(*args) + +# RPC handler for running forward_async on the destination worker. +def remote_forward_async(remote_module, args): + # Since future cannot be pickled and sent over the RPC layer, + # have to wait and behave just like ``forward_sync``. + return remote_module.forward_async(*args).wait() + +# RPC handler for getting training mode on the destination worker. +def get_remote_training_arg(module_rref): + return module_rref.local_value().training + +class ModuleCreationMode(enum.Enum): + MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface" + MODULE_CTOR = "module_ctor" + + +@torch.jit.interface +class MyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +@torch.jit.interface +class RemoteMyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + def forward_async( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Future[Tuple[str, int, Tensor]]: + pass + + +class MyModule(nn.Module): + def __init__(self, first_arg, first_kwarg=-1): + super().__init__() + self.param1 = _PARAM_VAL + + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + return word, number, tensor + + +class BadModule: + def __init__(self, first_arg, first_kwarg=-1): + pass + + +def create_scripted_module(first_arg, first_kwarg=-1): + module = MyModule(first_arg, first_kwarg=first_kwarg) + scripted_module = torch.jit.script(module) + return scripted_module + + +# Common utils for both CPU and CUDA test suites +class CommonRemoteModuleTest(RpcAgentTestFixture): + @property + def world_size(self): # Override setting in RpcAgentTestFixture + return 2 + + @staticmethod + def _create_remote_module_iter(remote_device, modes=None): + if modes is None: + modes = ModuleCreationMode.__members__.values() + + args = (1,) + kwargs = dict(first_kwarg=2) + + if ModuleCreationMode.MODULE_CTOR in modes: + remote_module = RemoteModule(remote_device, MyModule, args, kwargs) + yield remote_module + + if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes: + remote_module = _RemoteModule( + remote_device, + create_scripted_module, + args, + kwargs, + _module_interface_cls=MyModuleInterface, + ) + scripted_remote_module = torch.jit.script(remote_module) + yield scripted_remote_module + + +class RemoteModuleTest(CommonRemoteModuleTest): + @dist_utils.dist_init + def test_bad_module(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + remote_device = f"{dst_worker_name}/cpu" + args = (1,) + kwargs = dict(first_kwarg=2) + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + + @dist_utils.dist_init + def test_forward_async(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret_fut = remote_module.forward_async(*args) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_async_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward_async(scripted_remote_module: RemoteMyModuleInterface): + ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3") + ret = ret_fut.wait() + return ret + + ret = run_forward_async(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_sync(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret = remote_module.forward(*args) + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_sync_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_with_kwargs(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2) + kwargs = dict(word="3") + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + @dist_utils.dist_init + def test_remote_parameters(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``remote_parameters``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + param_rrefs = remote_module.remote_parameters() + self.assertEqual(len(param_rrefs), 1) + self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL)) + + @dist_utils.dist_init + def test_get_module_rref(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``get_module_rref``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + rref = remote_module.get_module_rref() + self.assertEqual(rref, remote_module.module_rref) + for param in rref.to_here().parameters(): + self.assertTrue(torch.equal(param, _PARAM_VAL)) + + @dist_utils.dist_init + def test_train_eval(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module.train() + ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret1, True) + + remote_module.eval() + ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret2, False) + + @dist_utils.dist_init + def test_unsupported_methods(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with self.assertRaisesRegex( + ValueError, r"Method ``register_buffer`` not supported for RemoteModule" + ): + remote_module.register_buffer("buffer", torch.ones(5)) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_parameter`` not supported for RemoteModule", + ): + remote_module.register_parameter( + "param", torch.nn.Parameter(torch.ones(1)) + ) + with self.assertRaisesRegex( + ValueError, r"Method ``add_module`` not supported for RemoteModule" + ): + remote_module.add_module("empty", None) + + with self.assertRaisesRegex( + ValueError, r"Method ``apply`` not supported for RemoteModule" + ): + fn = torch.rand((3, 3), requires_grad=False) + remote_module.apply(fn) + + with self.assertRaisesRegex( + ValueError, r"Method ``cuda`` not supported for RemoteModule" + ): + remote_module.cuda() + with self.assertRaisesRegex( + ValueError, r"Method ``cpu`` not supported for RemoteModule" + ): + remote_module.cpu() + with self.assertRaisesRegex( + ValueError, r"Method ``type`` not supported for RemoteModule" + ): + remote_module.type(torch.FloatTensor) + with self.assertRaisesRegex( + ValueError, r"Method ``float`` not supported for RemoteModule" + ): + remote_module.float() + with self.assertRaisesRegex( + ValueError, r"Method ``double`` not supported for RemoteModule" + ): + remote_module.double() + with self.assertRaisesRegex( + ValueError, r"Method ``bfloat16`` not supported for RemoteModule" + ): + remote_module.bfloat16() + with self.assertRaisesRegex( + ValueError, r"Method ``to`` not supported for RemoteModule" + ): + remote_module.to("cpu", dtype=torch.int32) + + def hook(module, grad_input, grad_output): + pass + + with self.assertRaisesRegex( + ValueError, + r"Method ``register_backward_hook`` not supported for RemoteModule", + ): + remote_module.register_backward_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_pre_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_pre_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_hook(hook) + + with self.assertRaisesRegex( + ValueError, r"Method ``state_dict`` not supported for RemoteModule" + ): + remote_module.state_dict() + with self.assertRaisesRegex( + ValueError, r"Method ``load_state_dict`` not supported for RemoteModule" + ): + remote_module.load_state_dict({}) + + with self.assertRaisesRegex( + ValueError, + r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.", + ): + remote_module.parameters() + with self.assertRaisesRegex( + ValueError, + r"Method ``named_parameters`` not supported for RemoteModule", + ): + remote_module.named_parameters() + with self.assertRaisesRegex( + ValueError, r"Method ``buffers`` not supported for RemoteModule" + ): + remote_module.buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``named_buffers`` not supported for RemoteModule" + ): + remote_module.named_buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``children`` not supported for RemoteModule" + ): + remote_module.children() + with self.assertRaisesRegex( + ValueError, r"Method ``named_children`` not supported for RemoteModule" + ): + remote_module.named_children() + with self.assertRaisesRegex( + ValueError, r"Method ``modules`` not supported for RemoteModule" + ): + remote_module.modules() + with self.assertRaisesRegex( + ValueError, r"Method ``named_modules`` not supported for RemoteModule" + ): + remote_module.named_modules() + + with self.assertRaisesRegex( + ValueError, r"Method ``requires_grad_`` not supported for RemoteModule" + ): + remote_module.requires_grad_() + with self.assertRaisesRegex( + ValueError, r"Method ``zero_grad`` not supported for RemoteModule" + ): + remote_module.zero_grad() + with self.assertRaisesRegex( + ValueError, r"Method ``share_memory`` not supported for RemoteModule" + ): + remote_module.share_memory() + with self.assertRaisesRegex( + ValueError, r"Method ``extra_repr`` not supported for RemoteModule" + ): + remote_module.extra_repr() + + @dist_utils.dist_init + def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # If a new attribute is added to this RemoteModule after the initialization, + # and it will be sent over the wire by RPC, + # this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES. + # Note that adding a new attribute out of constructor should rarely happen. + # If a new attribute is added to RemoteModule constructor, + # there is a sanity check to enforce developers to add this attribute to either + # _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + new_attr_name = "new_attr" + setattr(remote_module, new_attr_name, 1) + + attrs = rpc.rpc_sync( + dst_worker_name, remote_module_attributes, (remote_module,) + ) + self.assertNotIn(new_attr_name, attrs) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, + "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC", + ): + torch.save(remote_module, fname) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"): + torch.save(remote_module, fname) + + +class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest): + @property + def world_size(self): # Override setting in CommonRemoteModuleTest + return 3 + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs) + self.assertEqual(attrs["on"], "worker1") + self.assertEqual(attrs["device"], "cpu") + self.assertFalse(attrs["is_device_map_set"]) + self.assertFalse(attrs["is_scriptable"]) + + # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. + # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, + # not have another worker to initiate forward over the RPC layer. + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args)) + self.assertEqual(ret1, tuple(reversed(args))) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward_async, (remote_module, args) + ) + self.assertEqual(ret2, tuple(reversed(args))) + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire_script_not_supported(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + with self.assertRaisesRegex( + RuntimeError, "Passing a script RemoteModule over RPC is not supported." + ): + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + + @dist_utils.dist_init + def test_create_remote_module_from_module_rref(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module2 = rpc.rpc_sync( + dst_worker2_name, + RemoteModule.init_from_module_rref, + (dst_worker2_name, remote_module.get_module_rref()), + ) + + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync( + dst_worker1_name, remote_forward, (remote_module, args) + ) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward, (remote_module2, args) + ) + self.assertEqual(ret2, ret2) + + +class CudaRemoteModuleTest(CommonRemoteModuleTest): + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_valid_device(self): + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = dist_utils.worker_name(dst_rank) + + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + # Test rank works as well. + for remote_module in self._create_remote_module_iter( + f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_invalid_devices(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + with self.assertRaisesRegex( + RuntimeError, + r"Expected one of .+ device type at start of device string", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/foo", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + RuntimeError, r"CUDA error: invalid device ordinal" + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:100", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cpu2", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0/cuda:1", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /cuda:0. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device. + t1 = torch.ones(1) + args = (t1, 2) + t2 = t1 * 2 + kwargs = dict(word=t2) + + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE], + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[2].device.type, "cpu") diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cdc005679f82cdd41e77cb122eae11292105c12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..018acaf00621abe13a99a4975597c19d5e32355a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6770ea99a6e554d4ff6727acaaf47c6c4075165f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f02db434bc8688653753150da0dc25fd24087b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f62ec5a498c3e676997dc718514ec00e2e161105 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8927933374a4612a3536cae71af7ce18608480 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py @@ -0,0 +1,2783 @@ +# mypy: ignore-errors + +import sys +import threading +import time +from enum import Enum +import random +import torch +import torch.nn as nn +from datetime import timedelta +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.distributed.rpc import RRef +from torch.testing._internal.common_utils import IS_MACOS, skip_but_pass_in_sandcastle_if +from torch.testing._internal.dist_utils import ( + dist_init, + initialize_pg, + wait_until_node_failure, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu + + +# Right now we test up to 3-layer nested rpc calls. +# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id +# sent from prev rank respectively. +# rpc_done[2] and ctx_ids[2] represents for prev of prev rank. +# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank. +# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used. +rpc_done = [False, False, False, False] +ctx_ids = [-1, -1, -1, -1] + +known_context_ids = set() + +requires_grad_tensor = torch.ones(3, 3, requires_grad=True) + +# Send rpc done info and context_id to +# dst_rank = (self.rank + rank_distance) % self.world_size +# we don't need a lock here since the GIL is held while executing remote +# python UDFs, so access is serialized across several workers. +def _set_rpc_done(ctx_id, rank_distance): + global rpc_done + global ctx_ids + global known_context_ids + rpc_done[rank_distance] = True + ctx_ids[rank_distance] = ctx_id + known_context_ids.add(ctx_id) + + +def _check_rpc_done(rank_distance): + while not rpc_done[rank_distance]: + time.sleep(0.1) + + +def _torch_ones(sizes, requires_grad=False): + return torch.ones(sizes, requires_grad=requires_grad) + +# This method must be called on the rref owner, and verifies that the grad of +# rref tensor equals to the given grad. +def _compare_owner_value(context_id, rref, grad): + grads = dist_autograd.get_gradients(context_id) + x = grads[rref.local_value()] + if x.is_sparse: + assert grad.is_sparse + x = x.to_dense() + grad = grad.to_dense() + else: + assert not grad.is_sparse + return torch.equal(x, grad) + + +def create_tensor(): + return torch.ones((3, 3), requires_grad=True) + + +def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32): + i = [[0, 1, 1], [2, 0, 2]] + v = [3.2, 4.1, 5.3] + tensor = torch.sparse_coo_tensor( + i, v, (3, 3), requires_grad=requires_grad, dtype=dtype + ) + if coalesce: + tensor = tensor.coalesce() + return tensor + + +@torch.jit.script +def create_torchscript_tensor() -> torch.Tensor: + return torch.ones((3, 3)).requires_grad_() + + +def my_py_add(t1, t2): + return torch.add(t1, t2) + + +def my_scalar_add(a, b): + return a + b + + +def my_rref_add(rref_t1, t2): + ret = torch.add(rref_t1.local_value(), t2) + return ret + + +@torch.jit.script +def my_script_add(t1, t2): + return torch.add(t1, t2) + + +@torch.jit.script +def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor: + t1 = ref_t1.to_here() + return torch.add(t1, t2) + + +def my_nested_rref_add(dst, rref_t1, t2): + return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2)) + + +def ret_requires_grad(): + return requires_grad_tensor + + +def my_py_nested_call(t1, t2, dst, world_size, hops): + next_dst = (dst + 1) % world_size + if hops > 0: + return rpc.rpc_sync( + worker_name(next_dst), + my_py_nested_call, + args=(t1, t2, next_dst, world_size, hops - 1), + ) + else: + return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2)) + + +# after dist autograd context is cleaned up, it should be cleaned up on other +# nodes. This helper allows timeout_seconds for those RPCs to be completed, and +# ensures that all the contexts have been cleaned up in that timeframe.any +def _all_contexts_cleaned_up(timeout_seconds=10): + global known_context_ids + start = time.time() + context_id_to_raised = set() + while ( + time.time() - start < timeout_seconds + and context_id_to_raised != known_context_ids + ): + for context_id in known_context_ids: + try: + dist_autograd._retrieve_context(context_id) + except RuntimeError: + context_id_to_raised.add(context_id) + # all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError. + success = context_id_to_raised == known_context_ids + return success + + +# This function creates a dis autograd context, run rpc_sync on the given ps, +# and then blocks until the ps has verified the grads are correctly accumulated. +def _run_trainer(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + +# This function is the same as _run_trainer, except rpc calls torchscript +# function "my_script_ref_add" instead of python function "my_rref_add" +def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + + +class SimulateBackwardError(Function): + _simulate_error = True + + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + if SimulateBackwardError._simulate_error: + raise Exception("Simulate error on backward pass") + else: + return input + + +class ExecMode(Enum): + LOCAL = 1 # Run the operation locally. + RPC_SYNC = 2 # Run the operation using rpc_sync + REMOTE = 3 # Run the operation using remote. + RPC_ASYNC = 4 # Run the operation using rpc_async + + +# Common utils for both CPU and CUDA test suites +class CommonDistAutogradTest(RpcAgentTestFixture): + def _exec_func_with_dst(self, dst, exec_mode, method, *args): + if ExecMode.LOCAL == exec_mode: + if len(args) == 1 and isinstance(args[0], list): + return method(*args[0]) + return method(*args) + elif ExecMode.RPC_SYNC == exec_mode: + return rpc.rpc_sync(worker_name(dst), method, args=(args)) + elif ExecMode.REMOTE == exec_mode: + return rpc.remote(worker_name(dst), method, args=(args)).to_here() + elif ExecMode.RPC_ASYNC == exec_mode: + fut = rpc.rpc_async(worker_name(dst), method, args=(args)) + return fut.wait() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + def _exec_func(self, exec_mode, method, *args): + return self._exec_func_with_dst( + self._next_rank(), exec_mode, method, *args + ) + + def _next_rank(self): + if hasattr(self, "dst_rank"): + self.dst_rank = (self.dst_rank + 1) % self.world_size + if self.dst_rank == self.rank: + return self._next_rank() + else: + self.dst_rank = (self.rank + 1) % self.world_size + return self.dst_rank + + def _check_rpc_done(self, rank_distance): + _check_rpc_done(rank_distance) + + def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args): + if exec_mode == ExecMode.LOCAL: + torch.autograd.backward(tensors) + return [arg.grad for arg in args] + else: + self._verify_backwards_remote(tensors, context_id, local_grads, *args) + + def _verify_backwards_remote(self, tensors, context_id, local_grads, *args): + dist_autograd.backward(context_id, tensors) + + # Verify grads were accumulated appropriately. + grads = dist_autograd.get_gradients(context_id) + nargs = len(args) + ngrads = 0 + for i in range(0, nargs): + if local_grads[i] is not None: + self.assertIn(args[i], grads) + self.assertEqual(local_grads[i], grads[args[i]]) + ngrads += 1 + else: + self.assertNotIn(args[i], grads) + + self.assertEqual(ngrads, len(grads)) + + def _test_graph(self, fn, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor() + t2 = build_sparse_tensor() + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2)) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), fn, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Verify graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # Verify graph for previous context id. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # autograd context should be cleaned up by now. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._retrieve_context(context_id) + + # No autograd context available. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._current_context() + + # 3-layer nested calls + def _test_graph_for_py_nested_call(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + nest_dst_rank = (dst_rank + 1) % self.world_size + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + # Barrier to ensure all RPCs are done. + dist.barrier() + + for rd in [1, 2, 3]: + rpc.rpc_sync( + worker_name((self.rank + rd) % self.world_size), + _set_rpc_done, + args=(context_id, rd), + ) + + # Barrier to ensure all set_rpc_done have completed. + dist.barrier() + + # For self.rank, it has 4 graphs to verify + # One is for current context id when this rank send first rpc call. + # Second one is for prev context id when this rank make 1st nested + # call. + # Third one is for prev prev context id when this rank make + # 2nd nested call. + # Last one is for prev prev prev context id when this rank + # execute the torch.add() operator. + + # Verify first graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Verify second graph for 1st nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + + # Verify third graph for 2nd nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[2]) + self._verify_graph_for_nested_rpc_call(ctx) + + # verify last graph for rpc call execution. + ctx = dist_autograd._retrieve_context(ctx_ids[3]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # Rank0->Rank1->Rank0 + def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), + _set_rpc_done, + args=(context_id, 1), + ) + + # For self.rank, it has 2 graphs to verify. + # One is for current context id when this rank send first rpc + # call and execute the torch.add() operator. + # Another one is for prev context id when this rank make + # nested call. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(2, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + list(recv_functions.values())[1], + t1, + t2, + ret, + ) + self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1]) + + # Verify two pairs of send and recv functions for nested + # call + self._check_rpc_done(1) + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=False) + t2 = build_sparse_tensor(requires_grad=False) + else: + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.add, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + ctx = dist_autograd._current_context() + send_functions = ctx._send_functions() + self.assertEqual(len(send_functions), 0) + recv_functions = ctx._recv_functions() + self.assertEqual(len(recv_functions), 0) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # NB: RRef.to_here() always passes the autograd context to the + # the callee, as the caller does not know whether the return + # value would contain a requires_grad tensor or not. + # + # rpc/remote with udf (_set_rpc_done here) also always passes the + # autograd context to the callee due to the same reason. + self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1])) + dist.barrier() + + def _test_rpc_complex_args(self, exec_mode, sparse): + with dist_autograd.context() as context_id: + num_tensors = 10 + tensors = [] + for i in range(num_tensors): + if sparse: + tensor = build_sparse_tensor(requires_grad=(i % 2 == 0)) + else: + tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0)) + tensors.append(tensor) + dst_rank = self._next_rank() + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.stack, args=(tensors,) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.stack, args=(tensors,) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + self.assertEqual(torch.stack(tensors), ret) + + # Verify appropriate tensors have been attached the autograd graph. + next_funcs = next(iter(dist_autograd._current_context()._send_functions().values())).next_functions + idx = 0 + for i in range(len(next_funcs)): + self.assertEqual( + "torch::autograd::AccumulateGrad", next_funcs[i][0].name() + ) + self.assertEqual(tensors[i], next_funcs[i][0].variable) + + # Verify that the worker id has been recorded in the context + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(len(worker_ids), 1) + self.assertEqual(worker_ids, {dst_rank}) + + def context_cleanup_test_helper(self, rpc_args, func, nested=False): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + if nested: + dst_rank = (self.rank + 1) % self.world_size + nested_dst_rank = (dst_rank + 1) % self.world_size + dst_ranks = {dst_rank} + else: + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + if nested: + rpc.rpc_sync( + worker_name(nested_dst_rank), + _set_rpc_done, + args=(context_id, 2), + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + def _backward_no_grad_on_tensor(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + self.assertIsNone(t1.grad) + self.assertIsNone(t2.grad) + + # Now populate .grad with local autograd engine and + # verify dist autograd doesn't mess with it. + loss_local = torch.add(t1, t2) + if sparse: + loss_local = torch.sparse.sum(loss_local) + else: + loss_local = loss_local.sum() + loss_local.backward() + self.assertIsNotNone(t1.grad) + self.assertIsNotNone(t2.grad) + + t1_grad_before = t1.grad + t2_grad_before = t2.grad + dist_autograd.backward(context_id, [loss]) + self.assertEqual(t1_grad_before, t1.grad) + self.assertEqual(t2_grad_before, t2.grad) + + # The current rank first creates a tensor on the rref_owner, and then passes + # the rref with another tensor to the callee to run either my_rref_add or + # my_nested_rref_add, depending on whether the callee is the rref owner. + # The grad of tensor lives on the current rank, and the grad of the rref + # tensor lives on the rref owner. + def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse): + local_ret = torch.add(t1, t2) + if sparse: + local_ret = torch.sparse.sum(local_ret) + else: + local_ret = local_ret.sum() + local_ret.backward() + with dist_autograd.context() as context_id: + if sparse: + rref_t1 = rpc.remote( + rref_owner, build_sparse_tensor, args=(False, True,) + ) + else: + rref_t1 = rpc.remote( + rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True} + ) + if callee == rref_owner: + rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2)) + else: + rref = rpc.remote( + callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2) + ) + ret = rref.to_here() + if sparse: + ret = torch.sparse.sum(ret) + else: + ret = ret.sum() + dist_autograd.backward(context_id, [ret]) + + # verify grads on caller + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t2, grads) + self.assertEqual(grads[t2], t2.grad) + + # verify grads on rref owner + self.assertTrue( + rpc.rpc_sync( + rref_owner, + _compare_owner_value, + args=(context_id, rref_t1, t1.grad), + ) + ) + + # In this test, every rank will serve as a parameter server (ps) and a + # driver, and then kicks off trainers on the other three ranks. So, we have: + # ps = rank0 with trainers = rank1/2/3 + # ps = rank2 with trainers = rank2/3/0 + # ps = rank3 with trainers = rank3/0/1 + # ps = rank4 with trainers = rank0/1/2 + # + # These four test ps-trainer groups run on completely separate autograd + # graphs, but they share the same set of underlying RpcAgents. + def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse): + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones((3, 3), requires_grad=True) + t2 = torch.zeros((3, 3), requires_grad=True) + + local_ret = torch.add(t1, t2) + if sparse: + torch.sparse.sum(local_ret).backward() + else: + local_ret.sum().backward() + + # create rref on self + rref_t1 = rpc.remote( + worker_name(self.rank), + create_ref_fn, + args=()) + + # kick off forward and backward pass on three other workers (trainers) + rank_diffs = [1, 2, 3] + futures = [] + for rank_diff in rank_diffs: + futures.append( + rpc.rpc_async( + worker_name((self.rank + rank_diff) % self.world_size), + trainer_fn, + args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse), + ) + ) + + # check if the trainers have done with their backward pass + for rank_diff in rank_diffs: + self._check_rpc_done(rank_diff) + + # trainers are done and holding the context for verification + accumulate_grad_func = None + for rank_diff in rank_diffs: + # make sure grads are accumulated for the same tensors and values + # are all correct + ctx_id = ctx_ids[rank_diff] + grads = dist_autograd.get_gradients(ctx_id) + local_t1 = rref_t1.to_here() + self.assertIn(local_t1, grads) + self.assertEqual(grads[local_t1], t1.grad) + + # unblock trainers + _set_rpc_done(None, 0) + + # wait until all trainers are done + torch.futures.wait_all(futures) + + def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + # Multiple RPCs between different nodes. + val = self._exec_func(exec_mode, torch.add, t1, t2) + val = self._exec_func(exec_mode, torch.mul, t3, val) + s1 = self._exec_func(exec_mode, torch.stack, (t4, val)) + s2 = self._exec_func(exec_mode, torch.stack, (t5, val)) + if sparse: + val = self._exec_func(exec_mode, torch.mul, s1, s2) + val = self._exec_func(exec_mode, torch.mul, val, val) + loss = torch.sparse.sum(val) + else: + val = self._exec_func(exec_mode, torch.bmm, s1, s2) + val = self._exec_func(exec_mode, torch.matmul, val, val) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5 + ) + local_grads = ret if ret else local_grads + + def _backward_different_dtypes(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + loss = self._exec_func(exec_mode, torch.add, t1, t2) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_python_udf(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, my_py_add, t1, t2) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_script_call(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ + ExecMode.LOCAL, + ExecMode.RPC_SYNC, + ExecMode.RPC_ASYNC, + ExecMode.REMOTE, + ]: + with dist_autograd.context() as context_id: + forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2) + if sparse: + loss = torch.sparse.sum(forward_ret) + else: + loss = forward_ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + def _nested_backward_accumulate_grads(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._test_nested_backward_accumulate_grads, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + # Run backward twice. + dist_autograd.backward(context_id, [loss], retain_graph=True) + dist_autograd.backward(context_id, [loss]) + + def _backwards_nested_python_udf(self, t1, t2, sparse): + t3 = t1 * t2 + t4 = t1 + t2 + res = t3 + t4 + loss = t1 * t2 * t3 * t4 * res + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + torch.autograd.backward([loss]) + + # Now run distributed autograd. + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_python_udf, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + def _mixed_requires_grad(self, t1, t2, sparse): + for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2 + ) + self.assertEqual(t1 * t2, ret) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + self.assertTrue(t1.requires_grad) + self.assertFalse(t2.requires_grad) + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t1, grads) + self.assertNotIn(t2, grads) + self.assertEqual(t2, grads[t1]) + + def _multiple_backward(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + # Run backward in a loop multiple times. + for i in range(1000): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # For current context, this rank sends t1 and t2 tensors to dst_rank, + # then get t3 = torch.add(t1, t2) result tensor. + # For the current context in this rank, it expects graph like this: + # send function: + # rpcSendBackward + # / \ + # t1.AccumulateGrad t2.AccumulateGrad + # + # recv function: + # + # | + # t3.rpcRecvBackward + # + def _verify_graph_for_first_rpc_call( + self, send_function, recv_function, t1, t2, ret + ): + # Retrieve the next functions in the graph. + next_funcs = send_function.next_functions + self.assertEqual(2, len(next_funcs)) + + # We should now hit t1 and t2 in the autograd graph. + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name()) + self.assertEqual(t1, next_funcs[0][0].variable) + self.assertEqual(0, next_funcs[0][1]) + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name()) + self.assertEqual(t2, next_funcs[1][0].variable) + self.assertEqual(0, next_funcs[1][1]) + + # Test recv functions. + self.assertEqual(ret.grad_fn, recv_function) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple(self, dst, t1, t2, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func_with_dst( + dst, exec_mode, torch.add, t1, t2 + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, executes torch.add(t1, t2) and sends + # result tensor t3 back. + # For this context in this rank, it expects graph like this: + # send and recv functions: + # rpcSendBackward + # | + # t3.AddBackward0 + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + def _verify_graph_for_rpc_call_exec(self, send_function): + # Verify next function is AddBackward0 + next_funcs = send_function.next_functions + self.assertEqual(1, len(next_funcs)) + add_backward_fn = next_funcs[0][0] + self.assertEqual("AddBackward0", add_backward_fn.name()) + + # Verify the next two functions are the same recv backward function. + next_funcs = add_backward_fn.next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, forwards t1 and t2 tensors using + # nested rpc call to next dst. In return route, receive result tensor t3 + # from next dst and forwarding t3 back to previous calls. + # For this context in this rank, it expects graph like this: + # send and recv functions for receiving and forwarding t1 and t2: + # rpcSendBackward + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + # send and recv functions for receiving and forwarding t3: + # rpcSendBackward + # | + # t3.recvRpcBackward + def _verify_graph_for_nested_rpc_call(self, ctx): + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + + # For send function when making nest rpc call, + # next functions of the send function are two recv functions + # for received two tensors from previous call + next_funcs = next(iter(send_functions.values())).next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For send function when returning response to previous call + # next function of the send function is the recv function + # for received tensor result returned from nested call + next_funcs = list(send_functions.values())[1].next_functions + self.assertEqual(1, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + + +class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest): + + # Sparse tests only work with TensorPipeAgent. + @dist_init + def test_graph_for_builtin_call_sparse(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_python_call_sparse(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_builtin_remote_call_sparse(self): + self._test_graph(torch.add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_python_remote_call_sparse(self): + self._test_graph(my_py_add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True) + + @dist_init + def test_rpc_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, True) + + @dist_init + def test_remote_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.REMOTE, True) + + @dist_init + def test_context_cleanup_tensor_with_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_nested_rpc_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_backward_no_grad_on_tensor_sparse(self): + self._backward_no_grad_on_tensor( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_sparse(self): + self._backward_simple( + self._next_rank(), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_simple_self_sparse(self): + self._backward_simple( + self.rank, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_multi_sparse(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_sparse(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_nested_sparse(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_trainer_ps_sparse(self): + self._test_trainer_ps( + build_sparse_tensor, + _run_trainer, + True + ) + + @dist_init + def test_backward_multiple_round_trips_sparse(self): + self._backward_multiple_round_trips( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_different_dtypes_sparse(self): + self._backward_different_dtypes( + build_sparse_tensor(requires_grad=True, dtype=torch.float32), + build_sparse_tensor(requires_grad=True, dtype=torch.float64), + True + ) + + @dist_init + def test_backward_simple_python_udf_sparse(self): + self._backward_simple_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_script_call_sparse(self): + self._backward_simple_script_call( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_nested_backward_accumulate_grads_sparse(self): + self._nested_backward_accumulate_grads( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backwards_nested_python_udf_sparse(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_mixed_requires_grad_sparse(self): + self._mixed_requires_grad( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + True + ) + + @dist_init + def test_multiple_backward_sparse(self): + self._multiple_backward( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_embedding_bag_with_no_grad_tensors(self): + dst = self._next_rank() + remote_embedding = rpc.remote( + worker_name(dst), + torch.nn.EmbeddingBag, + args=(16, 16), + kwargs={"mode": "sum", "sparse": True}, + ) + local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True) + + input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) + # requires_grad = True to record send/recv functions + per_sample_weights = torch.rand((8), requires_grad=True) + offsets = torch.LongTensor([0, 4]) + + local_res = local_embedding(input, offsets, per_sample_weights) + + # Run backward twice. + torch.autograd.backward([local_res.sum()], retain_graph=True) + torch.autograd.backward([local_res.sum()]) + local_grad = local_embedding.weight.grad + + with dist_autograd.context() as context_id: + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._call_remote_embedding, + args=(remote_embedding, input, offsets, per_sample_weights), + ) + + # Run backward twice to test accumulation of sparse gradients. + dist_autograd.backward(context_id, [res.sum()], retain_graph=True) + dist_autograd.backward(context_id, [res.sum()]) + + remote_grad = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._get_grad, + args=(remote_embedding, context_id), + ) + + self.assertEqual(local_grad, remote_grad) + + +class DistAutogradTest(CommonDistAutogradTest): + @dist_init + def test_autograd_context(self): + # Verify max possible id. + max_auto_increment = 281474976710655 + self.assertEqual( + max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id() + ) + + context_ids = [] + for i in range(200): + with dist_autograd.context() as context_id: + self.assertEqual( + context_id, + dist_autograd._retrieve_context(context_id)._context_id(), + ) + # First 16 bits should be worker_id. + self.assertEqual(self.worker_id, context_id >> 48) + context_ids.append(context_id) + + for context_id in context_ids: + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd._retrieve_context(context_id) + + @dist_init + def test_nested_context(self): + with dist_autograd.context() as context_id: + # Nested contexts not supported. + with self.assertRaisesRegex( + RuntimeError, "Already have an autograd context id for this thread" + ): + with dist_autograd.context() as context_id: + pass + + @dist_init + def test_graph_for_builtin_call(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_python_call(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_builtin_remote_call(self): + self._test_graph(torch.add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_python_remote_call(self): + self._test_graph(my_py_add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False) + + def _test_grad_only_on_return_value(self, exec_mode): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), ret_requires_grad + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + dist_autograd.backward(context_id, [ret.sum()]) + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + grads = dist_autograd.get_gradients(ctx_ids[1]) + self.assertEqual(1, len(grads)) + self.assertIn(requires_grad_tensor, grads) + self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor]) + # due to the above get_gradients call, ensure that dist autograd + # contexts aren't cleaned up until all workers exit context managers + dist.barrier() + + @dist_init + def test_grad_only_on_return_value(self): + self._test_grad_only_on_return_value(ExecMode.RPC_SYNC) + + @dist_init + def test_grad_only_on_return_value_remote(self): + self._test_grad_only_on_return_value(ExecMode.REMOTE) + + @dist_init + def test_rpc_complex_args(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, False) + + @dist_init + def test_remote_complex_args(self): + self._test_rpc_complex_args(ExecMode.REMOTE, False) + + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad(self): + t1 = torch.ones(3, 3, requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_no_tensors(self): + self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add) + + @dist_init + def test_context_cleanup_nested_rpc(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_worker_ids_recorded(self): + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + with dist_autograd.context() as context_id: + # if no tensors require grad, we should still record worker_ids, as + # the autograd context ID is still passed to other workers. + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + # worker_ids should be recorded when tensors do require grad + t1.requires_grad = True + t2.requires_grad = True + for dst_rank in dst_ranks: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + @dist_init + def test_dist_autograd_profiling(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(3, 3, requires_grad=True) + loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum() + with torch.autograd.profiler.profile() as p: + dist_autograd.backward(context_id, [loss]) + + function_events = p.function_events + + def get_event(partial_key): + return next(event for event in function_events if partial_key in event.name) + + send_event = get_event("SendRpcBackward") + recv_event = get_event("RecvRpcBackward") + backward_event = get_event("torch::distributed::autograd::backward") + # There should be at least 1 send and recv_events each, corresponding to send/recv functions executed. + self.assertEqual(send_event.count, 1) + self.assertEqual(recv_event.count, 1) + # The CPU total for backward event should be great than send and recv, since + # applying those functions in the backwards pass is a subset of the entire backward pass. + self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total) + self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total) + + @dist_init + def test_error_in_context(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(6, 6, requires_grad=True) + + with self.assertRaises(RuntimeError): + # This should throw an error since matrix sizes don't match. + rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + + @dist_init + def test_backward_no_grad_on_tensor(self): + self._backward_no_grad_on_tensor( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + False + ) + + @dist_init + def test_backward_simple(self): + self._backward_simple( + self._next_rank(), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_simple_self(self): + self._backward_simple( + self.rank, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_multi(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_nested(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_trainer_ps(self): + self._test_trainer_ps( + create_tensor, + _run_trainer, + False + ) + + @dist_init + def test_trainer_ps_torchscript_functions(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + api._ignore_rref_leak = True + + self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False) + + @dist_init + def test_backward_multiple_round_trips(self): + self._backward_multiple_round_trips( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_different_tensor_dims(self): + local_grads = None + t1 = torch.rand((4, 6), requires_grad=True) + t2 = torch.rand((6, 5)) + t3 = torch.rand((5, 7), requires_grad=True) + t4 = torch.rand((7, 9)) + + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + val = self._exec_func(exec_mode, torch.matmul, t1, t2) + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4)) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_unused_tensors(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3)) + val = self._exec_func( + exec_mode, + torch.matmul, + torch.narrow(s, 0, 0, 1), + torch.narrow(s, 0, 2, 1), + ) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_multiple_output_tensors(self): + local_grads = None + t = torch.rand((10, 2), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + tensor_list = self._exec_func(exec_mode, torch.split, t, 2) + t1 = tensor_list[0] + t2 = tensor_list[2] + t3 = tensor_list[4] + + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3)) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t + ) + local_grads = ret if ret else local_grads + + def _run_test_backward_unused_send_function_in_thread(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + # We don't use the result of an RPC function, as a result the + # backward pass would hang in the "FAST" mode. + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + val = torch.mul(t1, t2) + + # Run backward, this would hang forever. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init + def test_backward_unused_send_function(self): + # Run the test in a thread which would never finish. + t = threading.Thread( + target=self._run_test_backward_unused_send_function_in_thread + ) + t.daemon = True + t.start() + t.join(10) # Wait for 10s. + + # Verify thread is still alive (indicating backward hasn't completed yet). + self.assertTrue(t.is_alive()) + + @dist_init + def test_backward_autograd_engine_error(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + # Perform some ops before error simulation. + tmp = (t1 + t2) * (t1 + t2) + t3 = SimulateBackwardError.apply(tmp) + + # Run multiple round trips across different nodes and verify the + # original node receives an error thrown on a node deep in the chain. + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t2, t3) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.mul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.div, args=(val, t2) + ) + + with self.assertRaisesRegex( + RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass" + ): + # Run backwards, and validate we receive an error. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure(self): + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + # Wait for all RPCs to be done. + dist.barrier() + + # Kill all odd rank nodes. + if self.rank % 2 == 0: + shutdown_error_regex = self.get_shutdown_error_regex() + # Wait for all other nodes to die. + for rank in range(self.world_size): + if rank % 2 != 0: + wait_until_node_failure(rank, shutdown_error_regex) + + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex() + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since all + # other nodes are dead. + dist_autograd.backward(context_id, [res.sum()]) + else: + # Exit all other nodes. + pass + + @dist_init + def test_backward_without_context(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + dist_autograd.backward(context_id, [res.sum()]) + + @dist_init + def test_backward_without_rpc(self): + dst_rank = self.rank + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.add(t1, t2) + + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(torch.ones(3, 3), grads[t1]) + self.assertEqual(torch.ones(3, 3), grads[t2]) + + @dist_init + def test_backward_invalid_args(self): + with dist_autograd.context() as context_id: + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(context_id, None) + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(None, None) + + with self.assertRaisesRegex( + RuntimeError, "No tensors provided for gradient computation" + ): + dist_autograd.backward(context_id, []) + + with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"): + t = torch.rand(3, 3) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "is not a scalar, all roots need to be scalar" + ): + t = torch.rand(3, 3, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "does not have a valid gradient function" + ): + t = torch.rand(1, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + @dist_init + def test_backward_multiple_roots(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum() + r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum() + r3 = self._exec_func(exec_mode, torch.cos, t1).sum() + r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum() + + local_grads = self._verify_backwards( + exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2 + ) + + @dist_init + def test_backward_different_dtypes(self): + self._backward_different_dtypes( + torch.rand((3, 3), requires_grad=True, dtype=torch.float32), + torch.rand((3, 3), requires_grad=True, dtype=torch.float64), + False + ) + + @dist_init + def test_backward_simple_python_udf(self): + self._backward_simple_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_backward_simple_script_call(self): + self._backward_simple_script_call( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @staticmethod + def _complex_python_udf(t1, t2): + t3 = torch.nn.functional.linear(t1, t2) + t4 = torch.nn.functional.linear(t2, t3) + t5 = torch.nn.functional.linear(t3, t4) + return torch.linalg.multi_dot([t1, t2, t3, t4, t5]) + + @dist_init + def test_backward_complex_python_udf(self): + # Run the same code locally and with dist autograd and verify gradients + # are same. + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._complex_python_udf, t1, t2 + ) + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + @staticmethod + def _python_udf_with_backward_error(t1, t2): + t3 = t1 + t2 + t4 = SimulateBackwardError.apply(t3) + return torch.linalg.multi_dot([t1, t2, t3, t4]) + + @staticmethod + def _nested_rpc_call_backward_error(t1, t2, dst): + t1 = t1 * t2 + t2 = t1 + t2 + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2), + ) + return torch.linalg.multi_dot([t1, t2, res]) + + @dist_init + def test_backward_python_udf_error(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_rpc_call_backward_error, + args=(t1, t2, self._next_rank()), + ) + with self.assertRaisesRegex( + RuntimeError, "Simulate error on backward pass" + ): + dist_autograd.backward(context_id, [loss.sum()]) + + _backward_done = False + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure_python_udf(self): + # Set a short timeout to quickly time out failed RPCs. + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + dst = self._next_rank() + res = rpc.rpc_sync( + worker_name(dst), + my_py_nested_call, + args=(t1, t2, dst, self.world_size, 1), + ) + + dist.barrier() + + # Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error. + if self.rank == 2: + return + + store = dist.distributed_c10d._get_default_store() + if self.rank == 0: + # Wait for rank 2 to die. + shutdown_error_regex = self.get_shutdown_error_regex() + wait_until_node_failure(2, shutdown_error_regex) + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex(). + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since rank 2 is dead. + dist_autograd.backward(context_id, [res.sum()]) + + # Mark rank 0 is done in the store, since the RPC framework on + # some nodes might be broken at this point. + store.set('test_backward_node_failure_python_udf_rank0_done', "True") + else: + # Wait for backward to finish on rank 0. + store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10)) + + @staticmethod + def _nested_python_udf(t1, t2, dst): + t3 = t1 * t2 + t4 = t1 + t2 + res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4)) + return t1 * t2 * t3 * t4 * res + + @dist_init + def test_backwards_nested_python_udf(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + _test_clean_context_backward_context_id = None + + class MyBackwardFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + assert DistAutogradTest._test_clean_context_backward_context_id is not None + + # Release the context to simulate error (use barrier before releasing + # context to ensure all nodes execute the backward function). + dist.barrier() + dist_autograd._release_context( + DistAutogradTest._test_clean_context_backward_context_id + ) + + # Verify all contexts are cleaned up. + assert _all_contexts_cleaned_up() + + return input + + @dist_init + def test_clean_context_during_backward(self): + """ + This test simulates the situation where the 'backward' call might throw + an exception locally which would lead to the autograd context being + cleaned up if we're using the context manager. As a result, the autograd + context might be cleaned up while some threads are still using the + autograd context. + + It is fine for the 'backward' call to throw an exception in this test, + but the process should not crash. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + + context = dist_autograd._new_context() + context_id = context._context_id() + DistAutogradTest._test_clean_context_backward_context_id = context_id + + # Send the context id to all nodes. + for i in range(0, self.world_size): + if i != self.rank: + rank_distance = (i - self.rank + self.world_size) % self.world_size + rpc.rpc_sync( + worker_name(i), + _set_rpc_done, + args=(context_id, rank_distance), + ) + + dist.barrier() + + # Verify all context ids have been received. + self.assertEqual(self.world_size - 1, len(known_context_ids)) + + t1 = torch.rand((3, 3), requires_grad=True) + for i in range(0, 100): + dst = self._next_rank() + t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1)) + + # Call MyBackwardFunc as the first op of the backward pass to + # ensure we release the context early in the backward pass. + t1 = DistAutogradTest.MyBackwardFunc.apply(t1) + self.assertEqual(100, len(context._send_functions())) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd.backward(context_id, [t1.sum()]) + + # HACK: Killing workers since otherwise the autograd engine gets stuck on + # other nodes. The proper fix would be addressing: + # https://github.com/pytorch/pytorch/issues/27643, which would inform + # other nodes about the failure. + # The autograd engine gets stuck on other nodes since they're waiting to + # receive gradients from the node that received an error (and as a + # result it didn't execute the rest of the graph). + dist.barrier() + rpc.shutdown(graceful=False) + sys.exit(0) + + @classmethod + def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights): + embedding = embedding_rref.local_value() + return embedding(input, offsets, per_sample_weights) + + @classmethod + def _get_grad(cls, embedding_rref, context_id): + embedding = embedding_rref.local_value() + grad_map = dist_autograd.get_gradients(context_id) + return grad_map[embedding.weight] + + @classmethod + def _mixed_requires_grad_operaton(cls, t1, t2): + if t2.requires_grad: + return t1 - t2 + else: + return t1 * t2 + + @dist_init + def test_mixed_requires_grad(self): + self._mixed_requires_grad( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=False), + False + ) + + class TestDebugInfoFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + backward_passes = int(debug_info["num_current_backward_passes"]) + + # Hard to validate exact numbers because of the distributed nature. + # We can't use a barrier() here since that would block the single + # CPU thread available for autograd and can cause deadlocks. + assert backward_passes >= 1 and backward_passes <= 4 + return input + + @dist_init + def test_debug_info(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + i = 0 + res = {} + res[i] = t1 + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + # Call custom function in middle of backward pass to ensure all + # nodes are still waiting on a backward(). + res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i]) + i += 1 + + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + dist_autograd.backward(context_id, [res[i].sum()]) + + debug_info = dist_autograd._get_debug_info() + num_autograd_context = int(debug_info["num_autograd_contexts"]) + # Need atleast one context and not more than 4. + self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4) + + for rd in range(self.world_size - 1): + rpc.rpc_sync( + worker_name((self.rank + rd + 1) % self.world_size), + _set_rpc_done, + args=(context_id, rd + 1), + ) + + dist.barrier() + + # Validate information + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + self.assertEqual(0, int(debug_info["num_current_backward_passes"])) + # only have `num_current_backward_passes` and `num_autograd contexts` + self.assertTrue(len(debug_info) == 2) + + self.assertTrue(_all_contexts_cleaned_up()) + + # All contexts should be cleaned up. + debug_info = dist_autograd._get_debug_info() + self.assertEqual(0, int(debug_info["num_autograd_contexts"])) + + @staticmethod + def _workload_thread(): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2)) + t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3)) + t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4)) + t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5)) + + dist_autograd.backward(context_id, [t6.sum()]) + + @dist_init + def test_async_dist_autograd(self): + """ + This test ensures async processing for distributed autograd works + appropriately. This is achieved by spawning multiple threads and + hammering a single node with a lot of backward() calls. + """ + + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + # All other ranks schedule work on rank 0. + threads = [] + for i in range(20): + t = threading.Thread(target=DistAutogradTest._workload_thread) + t.start() + threads.append(t) + + for thread in threads: + thread.join() + + dist.barrier() + + @dist_init + def test_backward_accumulate_grads(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = torch.matmul(t1, t2) + # Run backward twice. + torch.autograd.backward([t3.sum()], retain_graph=True) + torch.autograd.backward([t3.sum()]) + + t3 = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + # Run backward twice. + dist_autograd.backward(context_id, [t3.sum()], retain_graph=True) + dist_autograd.backward(context_id, [t3.sum()]) + + # Verify the gradients are same for local and remote execution. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @staticmethod + def _test_nested_backward_accumulate_grads(t1, t2, dst_rank): + return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + + @dist_init + def test_nested_backward_accumulate_grads(self): + self._nested_backward_accumulate_grads( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_multiple_backward(self): + self._multiple_backward( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init(clean_shutdown=False) + def test_multiple_backward_with_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + f'worker{self._next_rank()}', + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2)).sum() + + try: + # Run backward in a loop multiple times. + for i in range(100): + if i < 50: + with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"): + dist_autograd.backward(context_id, [loss], retain_graph=True) + elif i > 50: + # Recovered from error. + dist_autograd.backward(context_id, [loss], retain_graph=True) + else: + dist.barrier() + SimulateBackwardError._simulate_error = False + dist.barrier() + finally: + # Sync before resetting flag. + dist.barrier() + + # Reset the flag. + SimulateBackwardError._simulate_error = True + + @dist_init + def test_backward_verify_hooks(self): + t1 = torch.ones((3, 3), requires_grad=True) + # Double the gradient. + t1.register_hook(lambda grad: grad * 2) + t2 = torch.ones((3, 3), requires_grad=True) + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, torch.matmul, t1, t2) + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_no_grad_copy(self): + ''' + Similar to test in test_autograd.py. + ''' + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad.data_ptr() + return grad, grad + + class MyFuncSingleGrad(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFuncSingleGrad.static_grad_ptr = grad.data_ptr() + return grad + + class NonContGradFunc(Function): + @staticmethod + def forward(ctx, inp1): + ctx.size = inp1.size() + return torch.tensor([1.]) + + @staticmethod + def backward(ctx, grad): + return torch.ones(1).expand(ctx.size) + + a = torch.randn(5, 6, requires_grad=True) + b = torch.randn(5, 6, requires_grad=True) + # non-contiguous grad should be copied + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))]) + grads = dist_autograd.get_gradients(context_id) + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + # test case that should trigger no copy for a + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFuncSingleGrad.static_grad_ptr + p_a = grads[a].data_ptr() + # Verify there was no clone. + self.assertTrue(p_a == p_g) + + # Test case that should trigger copy for both of a,b. This is + # different in the distributed autograd case since we hold + # a reference to all grads in a vector until all accumulation is done. + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a].data_ptr() + p_b = grads[b].data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # both should be copied. + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + @dist_init + def test_no_grad_copy_sparse(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + return grad + + class NonContGradFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + # Create a sparse tensor with non-contiguous indices and values + # and return as grad. + v = torch.rand(1, 3) + i = torch.ones(1, 1, dtype=torch.long) + nv = v.expand(8, 3) + ni = i.expand(1, 8) + ngrad = torch.sparse_coo_tensor(ni, nv, (10, 3), dtype=torch.float32) + NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() + return ngrad, ngrad + + a = torch.randn(10, 3, requires_grad=True) + b = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + # test case that should trigger no copy for a. + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + # check a uses the same buffer + self.assertTrue(p_a == p_g) + + # Run backwards multiple times. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # non-contiguous indices and value, we should trigger a copy. + with dist_autograd.context() as context_id: + emb_matrix = NonContGradFunc.apply(a, b) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = NonContGradFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + p_b = grads[b]._values().data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # Verify we cloned both grads. + self.assertFalse(p_a == p_g) + self.assertFalse(p_b == p_g) + + # Run backwards multiple times to verify accumulation. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + @dist_init + def test_grad_copy_sparse_indices_extra_ref(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + static_grad_indices_ref = None + static_grad_values_ref = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + # indices() and values() return views, so holding onto + # references of them would not increment refcount of indices + # and values inside the sparse tensor. + MyFunc.static_grad_indices_ref = grad._indices() + MyFunc.static_grad_values_ref = grad._values() + return grad + + a = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + self.assertIsNotNone(MyFunc.static_grad_indices_ref) + self.assertIsNotNone(MyFunc.static_grad_values_ref) + # grad would be stolen, since static_grad_indices_ref and + # static_grad_values_ref are holding onto views and don't bump the + # refcount. + self.assertTrue(p_g == p_a) + + @dist_init + def test_post_hooks(self): + self.hook_called_times = 0 + + def post_hook_add_one(output_grads, input_grads): + self.hook_called_times += 1 + return output_grads + + def post_hook_add_two(output_grads, input_grads): + self.hook_called_times += 2 + return output_grads + + t = torch.rand(10, 10, requires_grad=True) + a = t + t + + # Register post hooks + accumulate_grad_0 = a.grad_fn.next_functions[0][0] + accumulate_grad_0.register_hook(post_hook_add_one) + accumulate_grad_0.register_hook(post_hook_add_two) + + accumulate_grad_1 = a.grad_fn.next_functions[1][0] + accumulate_grad_1.register_hook(post_hook_add_two) + + with dist_autograd.context() as context_id: + loss = a.sum() + dist_autograd.backward(context_id, [loss]) + self.assertEqual(5, self.hook_called_times) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads)) + self.assertTrue(t in grads) + + @staticmethod + def _slow_add(t1, t2): + time.sleep(1) + t3 = t1 + t2 + t3.requires_grad = True + return t3 + + @dist_init + def test_thread_local_context_id(self): + t1 = torch.rand((3, 3)) + t2 = torch.rand((3, 3)) + + t3 = t1 + t2 + t3.requires_grad = True + t3.sum().backward() + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2)) + + with dist_autograd.context() as context_id: + loss = rref.to_here().sum() + # due to slow add, the continuation of this backward pass will be + # invoked by the previous rpc.remote thread which does not have a + # valid context_id. So, this can test whether we propagate + # thread_local states properly when jumping across threads on the + # server side. + dist_autograd.backward(context_id, [loss]) + self.assertTrue( + rpc.rpc_sync( + dst, + _compare_owner_value, + args=(context_id, rref, t3.grad) + ) + ) + + +class CudaDistAutogradTest(CommonDistAutogradTest): + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_simple(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + (t1 + t2).sum().backward() + with dist_autograd.context() as context_id: + t3 = t1 + t2 + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5) + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t7.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation_gpu_root(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t6.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + +class FaultyAgentDistAutogradTest(RpcAgentTestFixture): + # Reusing a simplified helper function from DistAutogradTest to ensure + # autograd context is successfully cleaned up even when RPCs are failing. + def context_cleanup_test_helper(self, rpc_args, func): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + + +class WrapperModule(nn.Module): + def __init__(self, model, device): + super().__init__() + self.model = model.to(device) + + def forward(self, *args): + return self.model(*args) + + def gradients(self, ctx_id): + grads = dist_autograd.get_gradients(ctx_id) + return [grads[p] for p in self.model.parameters()] + + +class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(4) + def test_device_maps_backward_pass(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t1 = torch.rand(10, device=self.rank, requires_grad=True) + t2 = torch.rand(10, device=self.rank, requires_grad=True) + with dist_autograd.context() as context_id: + res = rpc.rpc_sync(dst, torch.add, args=(t1, t2)) + dist_autograd.backward(context_id, [res.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(torch.ones(10), grads[t1]) + self.assertEqual(torch.ones(10), grads[t2]) + self.assertEqual(t1.device, grads[t1].device) + self.assertEqual(t2.device, grads[t2].device) + + rpc.shutdown() + + class MyRemoteCompute(torch.nn.Module): + def forward(self, input): + input = input * 2.0 + return input + + class MyLocalCompute(torch.nn.Module): + def __init__(self, next_stage): + super().__init__() + self.next_stage = next_stage + + def forward(self, input): + return self.next_stage.rpc_sync().forward(input) + + @skip_if_lt_x_gpu(4) + def test_dist_autograd_sync_streams(self): + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute) + local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute) + for _ in range(10): + input = torch.rand([1000, 10000], device=self.rank, requires_grad=True) + # Run local autograd + result = input * 2.0 + r = random.random() + loss = result.sum() * r + loss.backward() + + # Run distributed autograd + with dist_autograd.context() as context_id: + result = local_compute(input) + loss = result.sum() * r + dist_autograd.backward(context_id, [loss]) + + # Compare grads. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(input.grad, grads[input]) + + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_gradients_synchronizations(self): + options = self.rpc_backend_options + for peer_rank in range(self.world_size): + options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # this is master + layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)] + local_layers = [l.to(0) for l in layers] + remote_layers = [] + for rank in range(1, self.world_size): + remote_layers.append(rpc.remote( + worker_name(rank), + WrapperModule, + args=(layers[rank - 1], rank) + )) + + x = torch.randn(5000, 2000).to(0) + # local iteration + local_model = nn.Sequential(*local_layers) + local_model(x).sum().backward() + + # remote iteration + with dist_autograd.context() as context_id: + for remote_layer in remote_layers: + x = remote_layer.rpc_sync().forward(x) + + dist_autograd.backward(context_id, [x.sum()]) + + futs = [] + for remote_layer in remote_layers: + futs.append(remote_layer.rpc_async().gradients(context_id)) + + for i in range(len(futs)): + local_gradients = [p.grad for p in local_layers[i].parameters()] + for g1, g2 in zip(futs[i].wait(), local_gradients): + self.assertEqual(g1, g2) + + rpc.shutdown() diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b506d76746d8e025dd45191e7963144d402b96ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py @@ -0,0 +1,281 @@ +# mypy: ignore-errors + + +import threading + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +from torch import optim +from torch.distributed.optim import DistributedOptimizer +from torch.testing._internal.dist_utils import dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +class MyModule: + lock = threading.Lock() + + def __init__(self, requires_grad=True): + # cannot directly use torch.manual_seed(0) as all threads share the same + # default generator. The race from multiple RPC threads could mess up + # the draw order from the default RNG instance, leading to + # non-deterministic behavior. Hence, create a dedicated RNG here. + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu) + + def forward(self, t1): + return torch.mm(self.w, t1) + + def get_w(self): + return self.w + + +class FailingOptimizer(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + + def step(self, closure=None): + raise ValueError("Error running optimizer.") + + +class OptimizerFailingOnConstructor(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + raise ValueError("Error creating optimizer.") + + def step(self, closure=None): + raise NotImplementedError + + +def _call_method(method, obj_rref, *args, **kwargs): + return method(obj_rref.local_value(), *args, **kwargs) + + +def remote_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.remote on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a RRef to the remote method call result. + """ + return rpc.remote( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +def rpc_async_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.rpc_async on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a Future to the method call result. + """ + return rpc.rpc_async( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +class DistOptimizerTest(RpcAgentTestFixture): + @dist_init() + def test_dist_optim_exception(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + dist_optim = DistributedOptimizer( + FailingOptimizer, [remote_param1, remote_param2] + ) + + with dist_autograd.context() as context_id: + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1).sum() + + dist_autograd.backward(context_id, [loss]) + with self.assertRaisesRegex(Exception, "Error running optimizer"): + dist_optim.step(context_id) + + @dist_init() + def test_dist_optim_exception_on_constructor(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + with self.assertRaisesRegex(Exception, "Error creating optimizer."): + dist_optim = DistributedOptimizer( + OptimizerFailingOnConstructor, [remote_param1, remote_param2] + ) + + def _test_dist_optim_base(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule() + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + old_w1_remote = remote_param1.to_here() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait() + new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait() + + # ensure optimizer changed weights + self.assertNotEqual(old_w1, new_w1) + self.assertNotEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim(self): + self._test_dist_optim_base(optim.Adagrad, lr=0.05) + self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True) + self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True) + self._test_dist_optim_base(optim.SGD, lr=0.05) + self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True) + self._test_dist_optim_base(optim.Adadelta, rho=0.95) + self._test_dist_optim_base(optim.RMSprop, lr=0.05) + self._test_dist_optim_base(optim.Adamax, lr=0.05) + self._test_dist_optim_base(optim.Rprop, lr=0.05) + + def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule(requires_grad=False) + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule, args=(False,)) + remote_param1 = remote_module1.remote().get_w() + remote_param2 = remote_module2.remote().get_w() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = remote_module1.rpc_async().forward(t2) + output2 = remote_module2.rpc_async().forward(output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = remote_module1.rpc_async().get_w().wait() + new_w2 = remote_module2.rpc_async().get_w().wait() + + # ensure optimizer changed weights for w1 + self.assertNotEqual(old_w1, new_w1) + + # ensure optimizer not changed weights for w2 + self.assertEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim_none_grads(self): + self._test_dist_optim_none_grads(optim.SGD, lr=0.05) + self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Rprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6232b5cb51d21c65280f3faf59a49a6492c655a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py @@ -0,0 +1,326 @@ +# mypy: ignore-errors + +import torch +import time +import torch.distributed.rpc as rpc +from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs +from torch.testing._internal.dist_utils import ( + dist_init, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + +def add_rref_to_value(rref, value): + return rref.to_here() + value + +class FaultyAgentRpcTest(RpcAgentTestFixture): + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init(messages_to_delay={}) + def test_check_failed_messages(self): + if self.rank == 0: + dst_worker_b = worker_name((self.rank + 1) % self.world_size) + dst_worker_c = worker_name((self.rank + 2) % self.world_size) + + # Worker0 sends RPC to Worker1 and creates an RRef there + rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2))) + # Worker0 sends an RPC to Worker2 with the RRef as an arg + rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2))) + # check if the output is as expected + self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2))) + # explicitly delete all User RRefs + _delete_all_user_and_unforked_owner_rrefs() + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2) + self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]) + def test_custom_faulty_messages(self): + self.assertEqual( + {"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"}, + set(self.rpc_backend_options.messages_to_fail), + ) + + @dist_init(faulty_messages=[]) + def test_no_faulty_messages(self): + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0) + + @dist_init(messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_custom_messages_to_delay(self): + self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5}) + + def _test_remote_message_dropped_pickle(self, dst=None): + if self.rank != 0: + return + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, my_sleep_func, args=(1,)) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Attempt to fork the RRef should raise an error indicating the rpc.remote timeout. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref._serialize() + # Test that using RRef as arg over RPC (which forks) results in the same + # error + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1)) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle(self): + self._test_remote_message_dropped_pickle() + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle_to_self(self): + self._test_remote_message_dropped_pickle(self.rank) + + + def _test_remote_message_dropped_timeout(self, func, args, dst=None): + if self.rank != 0: + return + + # test the case where rpc.remote() message creation is completely dropped. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, func, args=args) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + # Note: during shutdown, logs will indicate "Could not find OwnerRRef..." + # on the owning nodes, this is expected because the OwnerRRef was never + # successfully created. Therefore, delAllUsers will work as expected. + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout_to_self(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + def _test_remote_message_delay_timeout(self, func, args, dst=None): + if self.rank != 0: + return + # Test the case where remote message is eventually processed on the owner, + # but the future on the creator times out before the response comes back. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, func, args=args, timeout=0.001) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # to_here() should now pick up that rpc.remote() creation has failed. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + # Test the case where rpc.remote() times out, but to_here() has already + # started blocking before. + # NOTE: we only test this when not sending to self, as to_here() calls + # calls localValue(), which does not send an RPC and thus does not have + # a timeout. This can be supported by allowing future.wait() to + # take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280) + if dst_rank != self.rank: + slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2) + + with self.assertRaisesRegex(RuntimeError, expected_error): + # to_here() should raise timeout error, since it does not know about the + # status of rpc.remote(). + slow_rref.to_here(0.001) + # Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete + # but this can be a noop since it may not exist on the owner yet. Later, + # the owner can process the RRef creation and wait for the delete message, + # thus leading to a timeout. + # Therefore, we wait until we get notification that pending owners have + # been confirmed before sending out RRefUserDeletes. + if dst_rank != self.rank: + wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_delay_timeout(func, args) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout_to_self(self): + func = my_sleep_func + args = (1,) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout_to_self(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1}) + def test_rref_to_here_timeout(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref.to_here(0.01) + + rref.to_here() + + @dist_init(faulty_messages=[]) + def test_rpc_builtin_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + # PYTHON_CALL message types which correspond to Python UDF over RPC + # by default get a delay (see faulty_rpc_agent_test_fixture) + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync( + dst_worker, + torch.add, + args=(torch.tensor(1), torch.tensor(1)), + timeout=1, + ) + + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1 + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_rpc_script_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + + fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7287a0c984f007ed757961df7473c3f27940dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py @@ -0,0 +1,62 @@ +# mypy: ignore-errors + +import torch.distributed.rpc as rpc +import torch.distributed.rpc._testing # noqa: F401 +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +# The following message types are currently retried in the RREF protocol and +# distributed autograd. Thus only these messages should be tested with the +# Faulty RPC Agent. +retryable_message_types = ["RREF_FORK_REQUEST", + "RREF_CHILD_ACCEPT", + "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ"] + +# The following messages incur the corresponding delay in seconds while being +# processed in FaultyTensorPipeAgent's enqueueSend() function. +default_messages_to_delay = { + "PYTHON_CALL": 1.5, # Python UDF + "SCRIPT_CALL": 1.5, # Script/Builtin +} + +class FaultyRpcAgentTestFixture(RpcAgentTestFixture): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.messages_to_fail = retryable_message_types + self.messages_to_delay = default_messages_to_delay + + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "FAULTY_TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + num_worker_threads=8, + num_fail_sends=3, + messages_to_fail=self.messages_to_fail, + messages_to_delay=self.messages_to_delay, + ) + + def setup_fault_injection(self, faulty_messages, messages_to_delay): + if faulty_messages is not None: + self.messages_to_fail = faulty_messages + if messages_to_delay is not None: + self.messages_to_delay = messages_to_delay + + def get_shutdown_error_regex(self): + error_regexes = [ + "Exception in thread pool task", + "Connection reset by peer", + "Connection closed by peer" + ] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..eb439258f168be54bff5221f873ace50ea5ea753 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py @@ -0,0 +1,66 @@ +# mypy: ignore-errors + +import os +from abc import ABC, abstractmethod + +import torch.testing._internal.dist_utils + + +class RpcAgentTestFixture(ABC): + @property + def world_size(self) -> int: + return 4 + + @property + def init_method(self): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + master_addr = os.environ["MASTER_ADDR"] + master_port = os.environ["MASTER_PORT"] + return f"tcp://{master_addr}:{master_port}" + else: + return self.file_init_method + + @property + def file_init_method(self): + return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format( + file_name=self.file_name + ) + + @property + @abstractmethod + def rpc_backend(self): + pass + + @property + @abstractmethod + def rpc_backend_options(self): + pass + + def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027 + """Method used by dist_init to prepare the faulty agent. + + Does nothing for other agents. + """ + pass + + # Shutdown sequence is not well defined, so we may see any of the following + # errors when running tests that simulate errors via a shutdown on the + # remote end. + @abstractmethod + def get_shutdown_error_regex(self): + """ + Return various error message we may see from RPC agents while running + tests that check for failures. This function is used to match against + possible errors to ensure failures were raised properly. + """ + pass + + @abstractmethod + def get_timeout_error_regex(self): + """ + Returns a partial string indicating the error we should receive when an + RPC has timed out. Useful for use with assertRaisesRegex() to ensure we + have the right errors during timeout. + """ + pass diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..25495f0bf88804f2b6f659abee9cd4339248551c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -0,0 +1,6495 @@ +# mypy: ignore-errors + +import concurrent.futures +import contextlib +import json +import os +import sys +import threading +import time + +from collections import namedtuple +from functools import partial +from threading import Event +from threading import Lock +from unittest import mock + +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.distributed.rpc as rpc +import torch.distributed.autograd as dist_autograd +from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo +from torch.distributed.rpc.api import _use_rpc_pickler, _thread_local_var, _wait_all +from torch.distributed.rpc.internal import ( + PythonUDF, + RPCExecMode, + _internal_rpc_pickler, + _build_rpc_profiling_key, +) +from torch.futures import Future +from torch.testing._internal.common_distributed import ( + skip_if_lt_x_gpu, + captured_output, + tp_transports, +) +from torch.testing._internal.common_utils import ( + IS_MACOS, + load_tests, + skip_but_pass_in_sandcastle_if, + get_cycles_per_ms, +) + +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + wait_until_node_failure, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_utils import TemporaryFileName + +from torch.autograd.profiler_legacy import profile as _profile + + +def foo_add(): + return torch.add(torch.ones(1), torch.ones(1)) + +def udf_with_torch_ops(device=-1, use_record_function=False): + device_ctx = contextlib.nullcontext() if device == -1 else torch.cuda.device(device) + record_function_ctx = ( + torch.autograd.profiler.record_function("##forward##") + if use_record_function + else contextlib.nullcontext() + ) + with device_ctx, record_function_ctx: + t1, t2 = torch.ones(1), torch.ones(1) + t = torch.add(t1, t2) + t = torch.mul(t, t) + t = t.relu() + t = t.sigmoid() + +# Events (operator invocations) that are expected to be ran as part of the above +# function. +EXPECTED_REMOTE_EVENTS = [ + "aten::ones", + "aten::ones", + "aten::add", + "aten::mul", + "aten::relu", + "aten::clamp_min", + "aten::sigmoid", +] + +# Remote operations are prefixed with the following string for RPC profiling. +REMOTE_OP_STR = "#remote_op: " + + +VALUE_FUTURE = concurrent.futures.Future() +DONE_FUTURE = concurrent.futures.Future() + +FIFTY_MIL_CYCLES = 50000000 + +_rpc_barrier_count = 0 + +def _increment_count(): + global _rpc_barrier_count + _rpc_barrier_count += 1 + +def _reset_count(): + global _rpc_barrier_count + _rpc_barrier_count = 0 + +class StubRpcAgent: + def __init__(self, world_size): + self.world_size = world_size + + def get_worker_infos(self): + return { + WorkerInfo(name=worker_name(rank), id=rank) + for rank in range(self.world_size) + } + + +def _stub_construct_rpc_backend_options_handler(**kwargs): + return mock.Mock() # RpcBackendOptions. + + +def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options): + return StubRpcAgent(world_size=world_size) + + +def set_value(value): + VALUE_FUTURE.set_result(value) + + +def wait_for_value_future(): + return VALUE_FUTURE.result() + + +def set_and_check_done(value): + VALUE_FUTURE.set_result(value) + return DONE_FUTURE.result() + + +# it is used to test python user defined function over rpc +# classes and functions are used to test python user defined class and +# methods over rpc +TensorClass = namedtuple("TensorClass", ["tensors"]) + +class MyPickleClass: + def __init__(self): + self.t = None + + def __getstate__(self): + (pickled_python_udf, tensors) = _internal_rpc_pickler.serialize( + PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None) + ) + return (pickled_python_udf, tensors) + + def __setstate__(self, obj): + python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1]) + result = python_udf.func(python_udf.args[0], python_udf.args[1]) + self.t = result + + def set(self, val): + self.t = val + + +class SlowPickleClass: + def __init__(self, t): + self.t = t + + def __getstate__(self): + time.sleep(self.t) + return (self.t, ) + + def __setstate__(self, obj): + self.t = obj[0] + time.sleep(self.t) + + +class MyClass: + def __init__(self, a, delay=False): + self.a = a + # delay initialization to simulate errors if specified + if delay: + time.sleep(2) + + def my_instance_method(self, b): + return self.a + b + + @classmethod + def my_class_method(cls, d, e): + return d + e + + @staticmethod + def my_static_method(f): + return f > 10 + + def increment_value(self, increment): + self.a += increment + + def get_value(self): + return self.a + + def my_slow_method(self, my_tensor_arg): + time.sleep(5) + return torch.add(self.a, my_tensor_arg) + + +def _call_method_on_rref(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def get_rref_list(values): + return [RRef(MyClass(a)) for a in values] + + +def add_rref_to_value(rref, value): + return rref.to_here() + value + + +def run_nested_pickle(pickle_cls_instance, tensor): + return pickle_cls_instance.t + tensor + +def build_sparse_tensor(coalesce=False): + i = [[0, 1, 1], [2, 0, 2]] + v = [3, 4, 5] + tensor = torch.sparse_coo_tensor(i, v, (2, 3)) + if coalesce: + tensor = tensor.coalesce() + return tensor + +def build_complex_tensors(): + a = torch.ones(3, 3) + b = [a, a] + c = [b, b] + d = [a, b] + e = {a: d} + return [a, b, c, d, e] + +def non_cont_test(t_view, t_cont): + if t_view.is_contiguous(): + raise Exception('t_view is contiguous!') + if not t_cont.is_contiguous(): + raise Exception('t_cont is not contiguous!') + if not torch.equal(t_view, t_cont): + raise Exception('t_view is not equal to t_cont!') + return t_view + +def my_function(a, b, c): + return a + b + c + + +def my_tensor_function(a, b): + return a + b + +def my_container_sum(a): + result = a[0] + for tensor in a[1:]: + result += tensor + return result + + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + + +def my_complex_tensor_function(list_input, tensor_class_input, dict_input): + res = list_input[0] + for t in list_input: + res += t + for v in dict_input.values(): + res += v + complex_tensors = tensor_class_input.tensors + return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2]) + + +def my_rref_function(rref_a, rref_b): + return rref_a.to_here() + rref_b.to_here() + + +def delayed_add(a, b, seconds=0.05): + time.sleep(seconds) + return a + b + + +def identity(a): + return a + +def no_result(): + print("do nothing") + +def raise_or_inc(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + +def nested_rpc(dst): + return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def nested_rpc_sparse(dst): + return rpc.rpc_sync( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ) + + +def multi_layer_nested_async_rpc(dst, world_size, ttl): + # this method returns immediately without blocking the callee, but will + # generate additional requests. + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + rpc.rpc_async( + current_dst, + multi_layer_nested_async_rpc, + args=(next_dst, world_size, ttl - 1), + ) + return 0 + + +def nested_rref(dst): + return ( + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)), + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)), + ) + + +def nested_rref_sparse(dst): + return ( + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + ) + + +def nested_remote(dst): + rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3)) + return rref.to_here() + +def nested_remote_sparse(dst): + rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())) + return rref.to_here() + + +def rref_forward_chain(dst, world_size, rref, ttl): + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + ret_rref = rpc.remote( + current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1) + ) + return [ret_rref] + else: + return rref.to_here() + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def light_rpc(): + return 0 + + +def heavy_rpc(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +def heavy_rpc_sparse(tensor): + for i in range(1, 100): + tensor *= i + tensor = tensor / (i + 1) + return 0 + +@torch.jit.script +def heavy_rpc_torchscript(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + + +expected_err = "Expected error" + +# Note that it needs to inherit from Exception, not BaseException. See comment +# in rpc/internal.py +class CustomException(Exception): + def __init__(self, bool, msg): + self.bool = bool + super().__init__(msg) + +def raise_func(): + raise ValueError(expected_err) + +def custom_raise_func(): + raise CustomException(True, "foo") + +@torch.jit.script +def raise_func_script(expected_err: str) -> torch.Tensor: + raise ValueError(expected_err) + +expected_err_escape = "\nFirst line of error \n next line of error \n last line of error" +def raise_func_escape(): + raise ValueError(expected_err_escape) + + +global_rref = None + + +def set_global_rref(rref): + global global_rref + global_rref = rref + + +def clear_global_rref(): + global global_rref + global_rref = None + + +def check_rref_confirmed(rref): + return rref.confirmed_by_owner() + + +def get_rref_debug_info(): + return _rref_context_get_debug_info() + + +def add_use_future_cb(to, x, y, z): + out = concurrent.futures.Future() + + def callback(fut): + out.set_result(fut.wait() + z) + + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(callback) + return out.result() + + +def get_events_from_profile(profile_rref): + return profile_rref.local_value().process_global_function_events + + +def add_use_future_set_result(to, x, y, z): + out = torch.futures.Future() + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(lambda fut : out.set_result(fut.wait() + z)) + return out.wait() + + +def add_use_future_nested_cb(to, x, y, z): + out = torch.futures.Future() + + def callback(fut1): + fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z)) + fut2.then(lambda fut2 : out.set_result(fut2.wait())) + + fut1 = rpc.rpc_async(to, torch.add, args=(x, y)) + fut1.then(callback) + return out.wait() + + +def fail_on_fut(fut): + pass + + +@rpc.functions.async_execution +def async_raise_func(): + raise RuntimeError("Expected error") + + +@rpc.functions.async_execution +def async_wrong_type(): + return torch.zeros(2, 2) + + +@rpc.functions.async_execution +def async_add(to, x, y): + return rpc.rpc_async(to, torch.add, args=(x, y)) + + +def slow_add(x, y, device="cpu"): + time.sleep(1) + x = x.to(device) + y = y.to(device) + return torch.add(x, y).cpu() + + +@rpc.functions.async_execution +def slow_async_add(to, x, y, device="cpu"): + return rpc.rpc_async(to, slow_add, args=(x, y, device)) + + +@rpc.functions.async_execution +def async_add_with_future_ctor(to, x, y, z): + fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut1: fut.set_result(fut1.wait() + z) + ) + return fut + + +@rpc.functions.async_execution +def async_add_chained(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_chained_multi(to, x, num, step): + fut = rpc.rpc_async(to, torch.add, args=(x, 0)) + for _ in range(num): + fut = fut.then(lambda fut: fut.wait() + step) + return fut + + +@rpc.functions.async_execution +def async_add_nested(to, x, y, z): + return rpc.rpc_async(to, async_add, args=(to, x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_multi_fanout(to, x, num, step): + futs = [] + for i in range(num): + if i == 0: + futs.append(rpc.rpc_async(to, torch.add, args=(x, step))) + else: + futs.append(rpc.rpc_async(to, torch.add, args=(0, step))) + + # TODO: use torch.futures.collect_all + lock = Lock() + state = {"cnt": 0, "ret": torch.zeros_like(x)} + ret_future = torch.futures.Future() + + def inc_and_set(fut): + with lock: + state["cnt"] += 1 + state["ret"] += fut.wait() + if state["cnt"] >= len(futs): + ret_future.set_result(state["ret"]) + + for fut in futs: + fut.then(inc_and_set) + + return ret_future + + +@rpc.functions.async_execution +def async_cuda_sleep_and_set_to_one(t): + device = t.device + original_stream = torch.cuda.current_stream(device) + new_stream = torch.cuda.Stream(device) + new_stream.wait_stream(original_stream) + with torch.cuda.stream(new_stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + t.fill_(1) + fut = Future(devices=[device]) + fut.set_result(t) + return fut + + +@rpc.functions.async_execution +def async_cuda_nested_add(to, x, y, z): + def cb(fut): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + return fut.value() + z + + return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb) + + +# A custom Python class that contains a tensor, needed to see if we correctly +# use the Python pickler to extract tensors from non-IValue-convertible types. +class TensorWrapper: + __slots__ = ("tensor", "lock", "event", "thread") + + def __init__(self, t): + self.tensor = t + # Add one non-picklable field, to ensure it's ignored/skipped. + self.lock = Lock() + self.event = torch.cuda.Event(enable_timing=True) + self.thread = threading.Thread() + self.thread.start() + + def increase(self, v): + with self.lock: + self.tensor += v + + def sum(self): + with self.lock: + self.event.record() + return self.tensor.sum() + + +class AsyncExecutionClass: + + @staticmethod + @rpc.functions.async_execution + def static_async_add(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + @classmethod + @rpc.functions.async_execution + def class_async_add(cls, to, x, y, z): + ret_fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: ret_fut.set_result(fut.wait() + z) + ) + return ret_fut + + @rpc.functions.async_execution + def bound_async_add(self, to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +def return_future(): + return torch.futures.Future() + + +class FooBackendOptions(rpc.RpcBackendOptions): + def __init__(self, init_method): + # Must call the __init__ of the superclass (and do so directly, + # without using super()) because... pybind. + rpc.RpcBackendOptions.__init__(self) + self.init_method = init_method + + +# load_tests from common_utils is used to automatically filter tests for +# sharding on sandcastle. This line silences flake warnings +load_tests = load_tests + + +class MyEmbeddingBagModel(torch.nn.Module): + def __init__(self, sparse): + super().__init__() + self.eb = torch.nn.EmbeddingBag( + 10, + 10, + sparse=sparse + ) + + def forward(self, x): + return self.eb(x) + + +class MyParameterServer: + def __init__(self, trainers): + self.lock = Lock() + self.trainers = trainers + self.iteration = 0 + self.updates = 0 + self.futures = [] + self.total = None + self.gradient = None + + @staticmethod + def get_gradient(rref): + return rref.local_value().gradient + + @staticmethod + @rpc.functions.async_execution + def average(rref, riteration, tensor): + self = rref.local_value() + fut = torch.futures.Future() + with self.lock: + if riteration > self.iteration: + self.iteration = riteration + self.updates = 0 + self.futures.clear() + self.futures.append(fut) + if self.total is None: + self.total = tensor + else: + self.total += tensor + self.updates += 1 + if self.trainers == self.updates: + self.gradient = self.total / float(self.trainers) + for fut in self.futures: + result = self.total / float(self.trainers) + fut.set_result(result) + return fut + + +class MyConvNetForMNIST(nn.Module): + def __init__(self, device): + super().__init__() + self.net = nn.Sequential( + nn.Conv2d(1, 16, 3, 1), + nn.ReLU(), + nn.Conv2d(16, 32, 3, 1), + nn.ReLU(), + nn.MaxPool2d(2), + nn.Flatten(1), + nn.Linear(4608, 128), + nn.ReLU(), + nn.Linear(128, 10), + ).to(device) + self.device = device + + def forward(self, x, is_rref=False): + x = x.to_here() if is_rref else x + with torch.cuda.stream(torch.cuda.current_stream(self.device)): + # intentionally adding delay to current CUDA stream + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + return self.net(x) + + def __getstate__(self): + # return an empty dict to avoid inspecting the model contents on the + # owner + return {} + + +class RpcTestCommon: + def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None): + if mode == RPCExecMode.SYNC: + return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs) + elif mode == RPCExecMode.ASYNC: + return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait() + elif mode == RPCExecMode.REMOTE: + return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here() + + def _self_py_udf_remote(self, worker_info, x, y, z): + rref = rpc.remote(worker_info, my_function, args=(x, y, z)) + self.assertEqual(rref.to_here(), x + y + z) + + def _self_remote_rref_as_rpc_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x)) + ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y)) + self.assertEqual(ret, x + y + z + x + y) + self.assertEqual(fut.wait(), x + y + z + x) + + def _self_remote_rref_as_remote_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x)) + self.assertEqual( + ret_rref.to_here(), x + y + z + x + ) + + def _world_size_one(self, a, b): + if self.rank == 0: + rpc.init_rpc( + name="me", + backend=self.rpc_backend, + rank=0, + world_size=1, + rpc_backend_options=self.rpc_backend_options, + ) + + def _rpc_sync(x, y): + expect = x * 2 + result = rpc.rpc_sync( + "me", + my_tensor_function, + args=(x, y) + ) + self.assertEqual(expect, result) + + def _rpc_async(x, y): + expect = x * 2 + result = rpc.rpc_async( + "me", + my_tensor_function, + args=(x, y) + ).wait() + self.assertEqual(expect, result) + + def _remote(x, y): + expect = x * 2 + result = rpc.remote( + "me", + my_tensor_function, + args=(x, y) + ).to_here() + self.assertEqual(expect, result) + + _rpc_sync(a, b) + _rpc_async(a, b) + _remote(a, b) + + rpc.shutdown() + + def _multi_rpc(self, sparse): + dst_rank = (self.rank + 1) % self.world_size + for i in range(20): + n = i + self.rank + 1 + if sparse: + x = build_sparse_tensor() * n + y = build_sparse_tensor() * n + else: + x = torch.ones(2, 2) + y = torch.ones(2, 2) + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(ret, x * 2) + + def _run_uneven_workload(self, f, x, num_repeat=30): + # worker0 drives and waits for worker1 and worker2 + # throughout the test. + if self.rank == 0: + self.assertTrue(self.world_size >= 3) + + # Phase 1: Only worker1 has workload. + dst = "worker1" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for fut in torch.futures.collect_all(futs).wait(): + self.assertEqual(fut.wait(), 0) + + # Phase 2: Only worker2 has workload. + # If join is not correctly implemented, + # worker2 should be closed by now. + dst = "worker2" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + + def _wait_all_workers(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _wait_all_workers_twice(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _nested_rpc(self, f, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + f, + args=(worker_name(self.rank),), + ) + self.assertEqual(ret, expected) + + def _stress_test_rpc(self, f, repeat=1000, args=()): + n = self.rank + 1 + dst_rank = n % self.world_size + futs = [] + tik = time.time() + for _ in range(repeat): + fut = rpc.rpc_async(worker_name(dst_rank), f, args=args) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + tok = time.time() + print( + f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds." + ) + + def _builtin_remote_ret(self, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.to_here(), expected) + + def _builtin_remote_self(self, x, y, expected): + rref = rpc.remote( + worker_name(self.rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.local_value(), expected) + + def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}): + m = 10 + n = self.rank + 1 + dst_rank = n % self.world_size + rrefs = [] + expected = [] + for i in range(m): + n = n + i + rrefs.append( + rpc.remote( + worker_name(dst_rank), + fn, + args=args_fn(n, sparse), + kwargs=kwargs_fn(n, sparse), + ) + ) + expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse))) + + for i in range(m): + self.assertEqual(rrefs[i].to_here(), expected[i]) + + def _py_rref_args(self, a, b, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(a, b) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(x, y) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rref_args_user_share(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + owner_rank = n % self.world_size + user_rank = (n + 1) % self.world_size + rref_a = rpc.remote( + worker_name(owner_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(owner_rank), my_function, args=(x, y, z) + ) + rref_c = rpc.remote( + worker_name(user_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rpc_rref_args(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(dst_rank), my_function, args=(x, y, z) + ) + + c = rpc.rpc_sync( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(c, expected) + + def _nested_remote(self, f, expected): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + + rref = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), expected) + + def _nested_rref(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref_of_rrefs = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + + # Say C has 2 OwnerRRefs. + # B has 2 UserRRefs to those 2 OwnerRRefs, respectively. + # This call is effectively A asking B to share its 2 UserRRefs. + rrefs = rref_of_rrefs.to_here() + + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _nested_rref_stress(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + all_rrefs = [] + for _ in range(20): + all_rrefs.append( + rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + ) + + for i in range(20): + rref_of_rrefs = all_rrefs[i] + rrefs = rref_of_rrefs.to_here() + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _trainer_func(self, rref, sparse): + m = MyEmbeddingBagModel(sparse=sparse) + loss_fn = nn.MSELoss() + for i in range(10): + outputs = m(torch.rand(10, 10).long()) + loss_fn(outputs, torch.rand(10, 10)).backward() + gradient = next(iter(m.parameters())).grad + fut = rref.rpc_async().average(rref, i, gradient) + gradient = fut.wait() + if gradient.is_sparse: + gradient = gradient.to_dense().double() + ps_gradient = rref.rpc_sync().get_gradient(rref) + if ps_gradient.is_sparse: + ps_gradient = ps_gradient.to_dense().double() + self.assertTrue(torch.equal(gradient, ps_gradient)) + + def _my_parameter_server(self, sparse): + ps_rref = RRef(MyParameterServer(self.world_size - 1)) + futures = [] + for index in range(1, self.world_size): + futures.append( + rpc.rpc_async( + worker_name((self.rank + index) % self.world_size), + self._trainer_func, + args=( + ps_rref, + sparse + ), + ) + ) + torch.futures.wait_all(futures) + + def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor): + # We check proper CUDA stream synchronization by adding to the tensor + # in one stream to get the expected value, and reading it from another stream. + future = Future(devices=["cuda:0"]) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + another_stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + if sparse_tensor: + tensor = build_sparse_tensor().to("cuda:0") + add_tensor = build_sparse_tensor().to("cuda:0") + expected_tensor = (tensor + add_tensor).coalesce() + else: + tensor = torch.zeros((100,), device="cuda:0") + add_tensor = torch.ones((100,), device="cuda:0") + expected_tensor = tensor + add_tensor + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor += add_tensor + if sparse_tensor: + tensor = tensor.coalesce() + future.set_result(wrapper(tensor)) + with torch.cuda.stream(another_stream): + tensor = unwrapper(future.wait()) + if sparse_tensor: + self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item()) + self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item()) + self.assertEqual(tensor.size(), expected_tensor.size()) + else: + self.assertTrue(torch.eq(tensor, expected_tensor).all().item()) + + +class RpcTest(RpcAgentTestFixture, RpcTestCommon): + @dist_init + def test_worker_id(self): + n = self.rank + 1 + peer_rank = n % self.world_size + self_worker_info = rpc.get_worker_info() + peer_worker_info = rpc.get_worker_info(worker_name(peer_rank)) + + self.assertEqual(self_worker_info.name, worker_name(self.rank)) + self.assertEqual(peer_worker_info.name, worker_name(peer_rank)) + + with self.assertRaisesRegex(RuntimeError, "could not find destination"): + unknown_worker_id = rpc.get_worker_info("WorkerUnknown") + + @dist_init + def test_get_worker_infos(self): + worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos() + + worker_names = {worker_info.name for worker_info in worker_infos} + expected_worker_names = { + worker_name(rank) for rank in range(self.world_size) + } + self.assertEqual(worker_names, expected_worker_names) + + worker_ids = {worker_info.id for worker_info in worker_infos} + expected_worker_ids = set(range(self.world_size)) + self.assertEqual(worker_ids, expected_worker_ids) + + @dist_init + def test_self_add(self): + self_worker_info = rpc.get_worker_info() + self_worker_name = worker_name(self.rank) + fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_send_to_rank(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test dense tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test invalid ranks + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + @dist_init + def test_self_py_udf_remote(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_remote_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_rref_proxy_non_exist(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + msg = "has no attribute \'non_exist\'" + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_sync().non_exist() + + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_async().non_exist().wait() + + with self.assertRaisesRegex(AttributeError, msg): + rref.remote().non_exist() + + def _test_rref_proxy_tensor(self, dst): + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + + expected = torch.ones(2, 2) + 1 + 3 + self.assertEqual(expected.size(), rref.rpc_sync().size()) + self.assertEqual(expected + 1, rref.rpc_async().add(1).wait()) + self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here()) + + @dist_init + def test_rref_proxy_tensor(self): + self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_tensor_self(self): + self._test_rref_proxy_tensor(rpc.get_worker_info()) + + @dist_init + def test_rref_proxy_reuse(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + my_function, + args=(torch.ones(2, 2), 1, 3) + ) + expected = torch.ones(2, 2) + 1 + 3 + + proxy_rpc_sync = rref.rpc_sync() + proxy_rpc_async = rref.rpc_async() + proxy_remote = rref.remote() + + self.assertEqual(expected.size(), proxy_rpc_sync.size()) + self.assertEqual(expected + 1, proxy_rpc_sync.add(1)) + self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4)) + + self.assertEqual(expected.size(), proxy_rpc_async.size().wait()) + self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait()) + self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait()) + + self.assertEqual(expected.size(), proxy_remote.size().to_here()) + self.assertEqual(expected + 5, proxy_remote.add(5).to_here()) + self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here()) + + def _test_rref_proxy_class(self, dst): + rref = rpc.remote(dst, MyClass, args=(7,)) + expected = MyClass(7) + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + expected.increment_value(3) + self.assertEqual(None, rref.rpc_sync().increment_value(1)) + self.assertEqual(None, rref.rpc_async().increment_value(1).wait()) + self.assertEqual(None, rref.remote().increment_value(1).to_here()) + + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + self.assertEqual( + expected.my_instance_method(2), + rref.rpc_sync().my_instance_method(2) + ) + self.assertEqual( + expected.my_instance_method(3), + rref.rpc_async().my_instance_method(3).wait() + ) + self.assertEqual( + expected.my_instance_method(4), + rref.remote().my_instance_method(4).to_here() + ) + + self.assertEqual( + expected.my_static_method(9), + rref.rpc_sync().my_static_method(9) + ) + self.assertEqual( + expected.my_static_method(10), + rref.rpc_async().my_static_method(10).wait() + ) + self.assertEqual( + expected.my_static_method(11), + rref.remote().my_static_method(11).to_here() + ) + + self.assertEqual( + expected.my_class_method(2, torch.zeros(2, 2)), + rref.rpc_sync().my_class_method(2, torch.zeros(2, 2)) + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(3, 3)), + rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait() + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(4, 4)), + rref.remote().my_class_method(2, torch.ones(4, 4)).to_here() + ) + + @dist_init + def test_rref_proxy_class(self): + self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_class_self(self): + self._test_rref_proxy_class(rpc.get_worker_info()) + + @mock.patch.object(torch.distributed.autograd, "_init") + @mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent") + @dist_init(setup_rpc=False) + def test_register_rpc_backend_and_set_and_start_rpc_backend( + self, mock_rpc_agent, mock_dist_autograd_init + ): + backend_name = "stub_backend" + + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + with self.assertRaisesRegex( + RuntimeError, "^RPC backend .+: already registered$" + ): + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + rpc.init_rpc( + name="worker1", + backend=backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + store, _, _ = next( + torch.distributed.rendezvous( + self.init_method, rank=self.rank, world_size=self.world_size + ) + ) + rpc._init_rpc_backend( + backend=self.rpc_backend, + store=store, + name="duplicate_name", + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name_2(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + rpc.init_rpc( + name=worker_name(self.rank % (self.world_size - 1)), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_reinit(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + # TODO: with TCP init, rank 0 raises Address already in use because + # rank 0 is the start daemon and the store is created before checking if + # RPC is already initialized in init_rpc. + if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0: + expected_reinit_err = "Address already in use" + else: + expected_reinit_err = "is already initialized" + + with self.assertRaisesRegex(RuntimeError, expected_reinit_err): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_pg_init_no_rpc_init(self): + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + class MyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin = torch.nn.Linear(3, 4) + + def forward(self, x): + return self.lin(x) + + model = MyModel() + model.train() + model = torch.nn.parallel.DistributedDataParallel(model) + + with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'): + params = [] + for param in model.parameters(): + params.append(RRef(param)) + + def test_world_size_one(self): + self._world_size_one( + torch.ones(2, 2), + torch.ones(2, 2) + ) + + @dist_init(setup_rpc=False) + def test_invalid_names(self): + + worker_id = 0 + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo("abc*", worker_id) + + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo(" ", worker_id) + + with self.assertRaisesRegex(RuntimeError, "must be non-empty"): + info = WorkerInfo("", worker_id) + + # If the number in the message does not match, it is likely that the + # value of MAX_NAME_LEN in RPC WorkerInfo has changed. + with self.assertRaisesRegex(RuntimeError, "shorter than 128"): + info = WorkerInfo("".join(["a" for i in range(500)]), worker_id) + + # Test that WorkerInfo can be pickled and sent in RPC call + @dist_init + def test_worker_info_pickle(self): + dst_rank = (self.rank + 1) % self.world_size + worker_info = rpc.api.get_worker_info() + ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,)) + self.assertEqual(ret, worker_info) + + @dist_init + def test_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @staticmethod + def return_callee_id(): + return rpc.get_worker_info().id + + @dist_init + def test_int_callee(self): + dst_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id) + self.assertEqual(ret, dst_rank) + + @dist_init + def test_add_with_id(self): + n = self.rank + 1 + dst_rank = n % self.world_size + workder_info = rpc.get_worker_info(worker_name(dst_rank)) + + ret = rpc.rpc_sync( + workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n)) + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @dist_init + def test_scalar_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n) + ) + self.assertEqual(ret, (torch.ones(n, n) + n)) + + @dist_init + def test_async_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_nonzero(self): + n = self.rank + 1 + dst_rank = n % self.world_size + x = torch.ones(self.world_size, self.world_size) + x[self.rank][self.rank] = 0 + ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,)) + self.assertEqual(ret, x.nonzero()) + + @dist_init + def test_multi_rpc(self): + self._multi_rpc(False) + + @dist_init + def test_future_wait_twice(self): + dst = worker_name((self.rank + 1) % self.world_size) + futs = [] + for i in range(20): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + torch.futures.wait_all(futs) + + for fut in futs: + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + @dist_init(setup_rpc=False) + def test_wait_all_workers_timeout(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + og_func = rpc.api._wait_all_workers + + def wait_all_workers_sleep(timeout): + rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout) + + rpc.api._wait_all_workers = wait_all_workers_sleep + + try: + with self.assertRaisesRegex(RuntimeError, ''): + rpc.shutdown(graceful=True, timeout=0.01) + finally: + rpc.api._wait_all_workers = og_func + dist.barrier() + + def test_wait_all_workers_dense(self): + self._wait_all_workers(heavy_rpc, torch.ones(100, 100)) + + def test_wait_all_workers_twice_dense(self): + self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100)) + + @dist_init + def test_all_gather(self): + info = rpc.get_worker_info() + results = rpc.api._all_gather(info.id) + expected = {} + for info in rpc._get_current_rpc_agent().get_worker_infos(): + expected[info.name] = info.id + + self.assertEqual(expected, results) + + @dist_init + def test_all_gather_timeout(self): + rpc._set_rpc_timeout(0.1) + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, + "timed out in _all_gather after 0\\.10 seconds" + ): + rpc.api._all_gather(SlowPickleClass(0.5)) + else: + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.api._all_gather(SlowPickleClass(0.5)) + + def _test_barrier_helper(self, info, names, multi_threaded=False): + names = sorted(names) + leader = names[0] + rpc.rpc_sync(leader, _reset_count) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, 0) + rpc.api._barrier(names) + rpc.rpc_sync(leader, _increment_count) + rpc.api._barrier(names) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, len(names)) + + @dist_init + def test_rpc_barrier_all(self): + # Test rpc barrier when called with full list of workers + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_subset(self): + # Test rpc barrier when processes are called with different subsets of the full list + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [worker.name for worker in all_worker_info if not worker.id % 2] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_partial_subset(self): + # Test rpc barrier when some processes are not involved in the barrier + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [f"worker{info.id}"] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_multithreaded(self): + # This tests validates the implementation of barrier when multiple threads call into it + # We only need to check that it does not hang in this case + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + threads = [] + for _ in range(3): + th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True)) + threads.append(th) + th.start() + for th in threads: + th.join() + + @dist_init + def test_graceful_shutdown_with_uneven_workload(self): + """Test graceful termination.""" + self._run_uneven_workload(heavy_rpc, torch.ones(100, 100)) + + @dist_init(setup_rpc=False) + def test_shutdown_followed_by_rpc(self): + # Initialize RPC. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + rpc.shutdown() + + with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"): + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + + @dist_init + def test_expected_src(self): + dst_rank = (self.rank + 1) % self.world_size + expected_src_rank = (self.rank - 1) % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,)) + value = VALUE_FUTURE.result() + self.assertEqual(value, expected_src_rank) + + @dist_init + def test_py_built_in(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2)) + self.assertEqual(ret, min(n, n + 1, n + 2)) + + @dist_init + def test_py_user_defined(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(ret, my_function(n, n + 1, n + 2)) + + def test_build_rpc_profiling_key(self): + # Tests that the name that shows up as an Event in profiling RPCs has all + # the necessary information. + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + rpc_profiling_key = _build_rpc_profiling_key( + exec_mode, "foo", "worker0", "worker1" + ) + self.assertIn(exec_mode.value, rpc_profiling_key) + self.assertIn("foo", rpc_profiling_key) + self.assertIn("worker0", rpc_profiling_key) + self.assertIn("worker1", rpc_profiling_key) + + def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode): + self.assertTrue(self_worker_name in rpc_event.name) + self.assertTrue(dst_worker_name in rpc_event.name) + if isinstance(func, torch.jit.ScriptFunction): + self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name) + else: + self.assertTrue(func.__name__ in rpc_event.name) + self.assertTrue(rpc_exec_mode.value in rpc_event.name) + self.assertEqual(rpc_event.count, 1) + + @dist_init + def test_profiler_rpc_record_shapes(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + t1, t2 = torch.ones(100), torch.ones(100) + with _profile(record_shapes=True) as prof: + rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2)) + + function_events = prof.function_events + remote_events = [event for event in function_events if event.is_remote] + remote_add_event = next( + event for event in remote_events if "aten::add" in event.name + ) + remote_add_input_shapes = remote_add_event.input_shapes + # Run profiler on equivalent local op and validate shapes are the same. + with _profile(record_shapes=True) as prof: + torch.add(t1, t2) + + local_function_events = prof.function_events + local_add_event = next( + event for event in local_function_events if "aten::add" in event.name + ) + local_add_input_shapes = local_add_event.input_shapes + self.assertEqual(remote_add_input_shapes, local_add_input_shapes) + + @dist_init + def test_profiler_rpc_memory(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile(profile_memory=True) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + # if cpu_memory_usage was not propagated over the wire, this set would + # only contain 0 (indicates no memory being profiled) + self.assertNotEqual({0}, event_cpu_mem_usages) + # No memory profiled if profile_memory=False + with _profile(profile_memory=False) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + self.assertEqual({0}, event_cpu_mem_usages) + + @dist_init + def test_profiler_export_trace(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile() as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + events = p.function_events + with TemporaryFileName() as fname: + path = fname + p.export_chrome_trace(path) + with open(path) as f: + trace = json.load(f) + event_names = [event['name'] for event in trace] + for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]: + event_exists = any(expected_event_name in event_name for event_name in event_names) + self.assertTrue(event_exists) + + @dist_init + def test_profiler_rpc_key_names(self): + # tests that remote events are properly prefixed with the RPC profiling key. + if self.rank != 1: + return + + # Spawn multiple threads that send RPCs to ensure keys are correctly + # prefixed when there are multiple RPCs being created/in flight at the + # same time. + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + + def rpc_with_profiling(dst_worker): + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + fut.wait() + + events = prof.function_events + remote_event_names = { + event.name: event for event in events if event.is_remote + } + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + dst_worker, + ) + + remote_event_name_set = set(EXPECTED_REMOTE_EVENTS) + for name, event in remote_event_names.items(): + # Ensure that we have the expected key as part of the remote + # event. + self.assertTrue(name.startswith(rpc_profiling_key)) + self.assertTrue(event.is_remote) + self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id) + # Ensure that the remote event name also contains the operator. + operator_name_substr = name[len(rpc_profiling_key) :] + # Note: we don't assert that every remote event needs to be + # in the above set, the set is just a representative set of + # what we expect to see. The profiler can change and add more + # events, but we should always expect to see this representative + # set. + matching_event = { + remote_event_name + for remote_event_name in remote_event_name_set + if remote_event_name in operator_name_substr + } + remote_event_name_set -= matching_event + + # The set should be empty, otherwise its contained elements did + # not show up in the remote profiler output. + self.assertTrue( + remote_event_name_set == set(), + f"Expected {remote_event_name_set} to be included in remote profiler output.", + ) + + for dst in dst_ranks: + dst_worker = worker_name(dst) + num_parallel_rpcs = 2 + with concurrent.futures.ThreadPoolExecutor( + max_workers=num_parallel_rpcs + ) as executor: + futs = [ + executor.submit(rpc_with_profiling, dst_worker) + for _ in range(num_parallel_rpcs) + ] + # Wait for workers to finish test + for fut in futs: + fut.result() + + def _run_test_profiler_remote_events_profiled(self): + # Tests that we can successfully invoke the profiler on a remote node, + # and collect the remote events back in the local profiler. + if self.rank != 1: + return + + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + for dst in dst_ranks: + dst_worker = worker_name(dst) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + ret = fut.wait() + + events = prof.function_events + + rpc_event = get_function_event(events, RPCExecMode.ASYNC.value) + self.check_profiling_info( + worker_name(self.rank), + dst_worker, + udf_with_torch_ops, + rpc_event, + RPCExecMode.ASYNC, + ) + + remote_events = {event.name: event for event in events if event.is_remote} + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + worker_name(dst), + ) + + for expected_remote_event_name in EXPECTED_REMOTE_EVENTS: + expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name + self.assertTrue(expected_key in remote_events) + remote_event = remote_events[expected_key] + # Remote event should have a node ID corresponding to the worker + # it ran on. + self.assertEqual(remote_event.node_id, dst) + + # Validate order remote events show up in profiling output. + def convert_remote_to_local(event_name): + remote_op_key = rpc_profiling_key + REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + + len(remote_op_key) : + ] + + remote_events_list = [ + convert_remote_to_local(event.name) + for event in events + if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS + ] + self.assertEqual( + set(remote_events_list), + set(EXPECTED_REMOTE_EVENTS), + f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}", + ) + + @dist_init + def test_profiler_remote_events_profiled(self): + self._run_test_profiler_remote_events_profiled() + + @dist_init + def test_profiler_remote_events_profiled_single_threaded(self): + self._run_test_profiler_remote_events_profiled() + + def run_profiling_workload(self, dst): + fut = rpc.rpc_async( + worker_name(dst), + torch.mul, + args=( + torch.tensor(1.0, requires_grad=True), + torch.tensor(1.0, requires_grad=True), + ), + ) + fut.wait() + + def _run_rpc_profiling_async_function(self, device="cpu"): + if self.rank != 1: + return + + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + with _profile() as prof: + ret = rpc.rpc_async( + dst1, slow_async_add, args=(dst2, x, y, device), timeout=20 + ) + out = ret.wait() + + function_events = prof.function_events + # slow_async_add resulted in an RPC from dst1 -> dst2, so this should be + # recorded. + key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1 + ) + + nested_rpc_key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2 + ) + expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix + remote_events = [event for event in function_events if event.is_remote] + rpc_remote_event = [ + event for event in remote_events if event.name == expected_key + ] + self.assertEqual(1, len(rpc_remote_event)) + rpc_remote_event = rpc_remote_event[0] + self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size) + # slow_async_add's RPC does an add on dst2, which should be reflected as well. + remote_add_key = ( + expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add) + ) + remote_add_event = [ + event for event in remote_events if event.name == remote_add_key + ] + self.assertEqual(1, len(remote_add_event)) + remote_add_event = remote_add_event[0] + # Validate that node_id is dst2. + self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size) + + @dist_init + def test_rpc_profiling_async_function(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_async_function_single_threaded(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_remote_record_function(self): + # test that functions run over RPC with record_function show the expected + # profiled block. + if self.rank != 1: + return + dst_ranks = [i for i in range(self.world_size) if i != self.rank] + for dst_rank in dst_ranks: + dst_worker = worker_name(dst_rank) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True)) + fut.wait() + + function_events = prof.function_events + record_function_remote_event = [ + evt for evt in function_events if "##forward##" in evt.name + ] + self.assertEqual(1, len(record_function_remote_event)) + record_function_remote_event = record_function_remote_event[0] + self.assertEqual(record_function_remote_event.node_id, dst_rank) + # cpu_children only returns direct children, so here we get all + # children recursively. + + def get_cpu_children(event): + if not event.cpu_children: + return [] + cpu_children = event.cpu_children + for e in event.cpu_children: + cpu_children.extend(get_cpu_children(e)) + return cpu_children + + remote_children = get_cpu_children(record_function_remote_event) + # Get local children and verify parity. + with _profile() as prof: + udf_with_torch_ops(-1, True) + + local_function_events = prof.function_events + local_record_function_event = next( + evt for evt in local_function_events if "##forward##" in evt.name + ) + local_children = get_cpu_children(local_record_function_event) + local_children_names = [ + evt.name for evt in local_children + ] + + REMOTE_OP_STR = "#remote_op: " + + def convert_remote_to_local(event_name): + remote_op_key = REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + len(remote_op_key) : + ] + + for evt in remote_children: + local_name = convert_remote_to_local(evt.name) + self.assertTrue(local_name in local_children_names) + + def validate_profiling_workload(self, dst, prof): + + def convert_remote_to_local(event_name): + return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :] + + events = prof.function_events + remote_events = { + convert_remote_to_local(event.name): event + for event in events + if event.is_remote + } + self.assertTrue("aten::mul" in remote_events) + remote_mul_event = remote_events["aten::mul"] + self.assertEqual(remote_mul_event.node_id, dst) + self.check_profiling_info( + worker_name(self.rank), + worker_name(dst), + torch.mul, + remote_mul_event, + RPCExecMode.ASYNC, + ) + + def _run_test_profiler_with_autograd_context(self): + dst = (self.rank + 1) % self.world_size + if self.rank == 1: + # Cases where we can double wrap messages with profiling information and autograd info. + with dist_autograd.context() as context_id: + with _profile() as prof: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + # Ensure that flipped order of ctx managers results in events being + # recorded as expected. + with _profile() as prof: + with dist_autograd.context() as context_id: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + @dist_init + def test_profiler_with_autograd_context_single_threaded(self): + self._run_test_profiler_with_autograd_context() + + @dist_init + def test_profiler_with_autograd_context(self): + self._run_test_profiler_with_autograd_context() + + def _profiler_test_with_rpc( + self, rpc_exec_mode, func, args, use_record_function=False, dst=None, kineto_profile=False + ): + dst = dst if dst is not None else (self.rank + 1) % self.world_size + + # only run profiler on rank 1. + p = _profile if not kineto_profile else torch.profiler.profile # kineto + if self.rank == 1: + with p() as prof: + record_function_ctx_mgr = ( + contextlib.nullcontext() + if not use_record_function + else torch.autograd.profiler.record_function( + "foo" + ) + ) + with record_function_ctx_mgr as rf: + if rpc_exec_mode == RPCExecMode.SYNC: + rpc.rpc_sync(worker_name(dst), func, args=args) + elif rpc_exec_mode == RPCExecMode.ASYNC: + fut = rpc.rpc_async(worker_name(dst), func, args=args) + if kineto_profile: + # Ensure multiple async RPCs don't cause issues. + # Would have raised + # "RuntimeError: Cannot call + # RemoteProfilerManager::setCurrentKey when current + # key is already set." error if RPC profiling was + # not disabled properly for kineto. + fut2 = rpc.rpc_async(worker_name(dst), func, args=args) + fut2.wait() + fut.wait() + else: + self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE) + rref = rpc.remote(worker_name(dst), func, args=args) + rref.to_here() + # To avoid flakiness, wait for the RRef to be profiled. This + # means that we received the acknowledgement of successful + # creation on the owner and ran the callbacks responsible + # for recording the profiling event. + rref._get_profiling_future().wait() + + events = prof.function_events if not kineto_profile else prof.events() + if kineto_profile: + # RPC profiling is disabled so there should be no rpc related + # events. + with self.assertRaises(IndexError): + get_function_event(events, rpc_exec_mode.value) + + return + + rpc_event = get_function_event(events, rpc_exec_mode.value) + # verify Node ID for this rpc event. + self.assertEqual(rpc_event.node_id, self.rank) + # Ensure recording of remote events. + remote_events = {event for event in events if event.node_id == dst} - {rpc_event} + self.assertGreaterEqual(len(remote_events), 1) + for remote_event in remote_events: + self.assertEqual(remote_event.node_id, dst) + + if use_record_function: + scope_event = get_function_event(events, "foo") + # Since RPC call is within the scope, its CPU interval should be + # contained within foo's interval. + self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start) + self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end) + # the sender, dest worker, function run, and type of RPC should all + # be recorded. + self_worker_name = worker_name(self.rank) + dst_worker_name = worker_name(dst) + self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode) + if use_record_function: + # verify order by ensuring that the outer context comes + # before the rpc event. + foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name) + rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name) + self.assertLess(foo_event_ix, rpc_event_idx) + + def _run_test_profiler_with_sync_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,), + use_record_function=True) + + @dist_init + def test_profiler_with_sync_rpc_udf(self): + self._run_test_profiler_with_sync_rpc_udf() + + @dist_init + def test_profiler_with_sync_rpc_udf_single_threaded(self): + self._run_test_profiler_with_sync_rpc_udf() + + def _run_test_profiler_with_sync_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_sync_rpc_builtin(self): + self._run_test_profiler_with_sync_rpc_builtin() + + @dist_init + def test_profiler_with_sync_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_sync_rpc_builtin() + + def _run_test_profiler_with_async_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,), + use_record_function=True) + # Test to ensure that kineto profiler enabled in RPC does not enable + # RPC profiling (it is unsupported) and does not result in issues. + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True + ) + + @dist_init + def test_profiler_with_async_rpc_udf(self): + self._run_test_profiler_with_async_rpc_udf() + + @dist_init + def test_profiler_with_async_rpc_udf_single_threaded(self): + self._run_test_profiler_with_async_rpc_udf() + + def _run_test_profiler_with_async_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_async_rpc_builtin(self): + self._run_test_profiler_with_async_rpc_builtin() + + @dist_init + def test_profiler_with_async_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_async_rpc_builtin() + + def _run_test_profiler_with_remote_udf(self): + self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank + ) + + @dist_init + def test_profiler_with_remote_udf(self): + self._run_test_profiler_with_remote_udf() + + @dist_init + def test_profiler_with_remote_udf_single_threaded(self): + self._run_test_profiler_with_remote_udf() + + def _run_test_profiler_with_remote_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + torch.mul, + args=(torch.ones(1), torch.ones(1)), + dst=self.rank, + ) + + @dist_init + def test_profiler_with_remote_builtin(self): + self._run_test_profiler_with_remote_builtin() + + @dist_init + def test_profiler_with_remote_builtin_single_threaded(self): + self._run_test_profiler_with_remote_builtin() + + def _run_test_profiler_with_script_async_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_async_rpc(self): + self._run_test_profiler_with_script_async_rpc() + + @dist_init + def test_profiler_with_script_async_rpc_single_threaded(self): + self._run_test_profiler_with_script_async_rpc() + + def _run_test_profiler_with_script_sync_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_sync_rpc(self): + self._run_test_profiler_with_script_sync_rpc() + + @dist_init + def test_profiler_with_script_sync_rpc_single_threaded(self): + self._run_test_profiler_with_script_sync_rpc() + + def _run_test_profiler_with_script_remote_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank + ) + + @dist_init + def test_profiler_with_script_remote_rpc(self): + self._run_test_profiler_with_script_remote_rpc() + + @dist_init + def test_profiler_with_script_remote_rpc_single_threaded(self): + self._run_test_profiler_with_script_remote_rpc() + + def _assert_top_level_events(self, process_global_events, expected_top_level_event_names): + top_level_event_names = [] + for thread_local_events in process_global_events: + # Get top-level events from all events happened on a thread. + last_end_time = 0 + for event in thread_local_events: + event_name = event.name + time_range = event.time_range + if time_range.start > last_end_time: + top_level_event_names.append(event_name) + last_end_time = time_range.end + top_level_event_names = sorted(top_level_event_names) + expected_top_level_event_names = sorted(expected_top_level_event_names) + self.assertEqual( + top_level_event_names, + expected_top_level_event_names, + f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}", + ) + + @dist_init + def test_server_process_global_profiler(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + + x = torch.tensor(1) + y = torch.tensor(2) + + outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + outer_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.add, (x, y)) + inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + inner_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.sub, (x, y)) + inner_profile_rref.rpc_sync().__exit__(None, None, None) + outer_profile_rref.rpc_sync().__exit__(None, None, None) + + inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,)) + expected_inner_events = ['aten::sub'] + expected_outer_events = expected_inner_events + ['aten::add'] + + self._assert_top_level_events(inner_events, expected_inner_events) + outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,)) + self._assert_top_level_events(outer_events, expected_outer_events) + + inner_profile_rref.rpc_sync().key_averages() + outer_profile_rref.rpc_sync().key_averages() + + @dist_init + def test_async_record_function_double_end_callbacks(self): + num_sleep_seconds = 1 + if self.rank == 1: + # Validate that calling the function twice results in an error. + with _profile() as pf: + with torch.autograd.profiler.record_function("foo") as rf: + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + rf._call_end_callbacks_on_future(fut) + with self.assertRaisesRegex( + RuntimeError, "can only be called once." + ): + rf._call_end_callbacks_on_future(fut) + fut.wait() + + @dist_init + def test_async_record_function_legacy(self): + # Test the legacy _record_function ops work + # Note: These exist for backward compatibility with TorchScript + num_sleep_seconds = 1 + if self.rank == 1: + with _profile() as pf: + try: + handle = torch.ops.profiler._record_function_enter("foo", None) + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut) + finally: + torch.ops.profiler._record_function_exit(handle) + + fut.wait() + + @dist_init + def test_async_record_function_cbs_jit_call(self): + if self.rank == 1: + with _profile() as pf: + key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(my_script_func), + "worker1", + "worker0", + ) + with torch.autograd.profiler.record_function(key) as rf: + fut = rpc.rpc_async( + worker_name(0), my_script_func, args=(torch.tensor(1),) + ) + # Intentionally calling record_function internals + fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.record, fut) + result = fut.wait() + # Validate that the profiling future returns the same value as the RPC + # future. + expected = torch.add(torch.tensor(1), torch.tensor(1)) + self.assertEqual(result, expected) + events = pf.function_events + rpc_event = get_function_event( + events, torch._jit_internal._qualified_name(my_script_func) + ) + self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name) + + @dist_init + def test_py_class_constructor(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,)) + self.assertEqual(ret.a, n) + + @dist_init + def test_py_class_instance_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,) + ) + self.assertEqual(ret, MyClass(2).my_instance_method(n)) + + @dist_init + def test_py_class_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1) + ) + self.assertEqual(ret, MyClass.my_class_method(n, n + 1)) + + @dist_init + def test_py_class_static_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,) + ) + self.assertEqual(ret, MyClass.my_static_method(n + 10)) + + @dist_init + def test_py_multi_async_call(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker_info = rpc.get_worker_info(worker_name(dst_rank)) + fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,)) + fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2)) + self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10)) + self.assertEqual(fut2.wait(), min(n, n + 1, n + 2)) + + @dist_init + def test_py_no_return_result(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), no_result) + self.assertEqual(ret, no_result()) + + @dist_init + def test_py_tensors(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n))) + + @dist_init + def test_py_tensors_multi_async_call(self): + futs = [] + n = self.rank + 1 + dst_rank = n % self.world_size + for i in range(100): + fut = rpc.rpc_async( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(i, i), torch.ones(i, i)), + ) + futs.append(fut) + + j = 0 + for val in torch.futures.wait_all(futs): + self.assertEqual( + val, my_tensor_function(torch.ones(j, j), torch.ones(j, j)) + ) + j += 1 + + @dist_init + def test_py_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [torch.ones(n, n), torch.ones(n, n)] + b = TensorClass(build_complex_tensors()) + c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)} + ret = rpc.rpc_sync( + worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c) + ) + self.assertEqual(ret, my_complex_tensor_function(a, b, c)) + + @dist_init + def test_py_nested_pickle(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_nested_pickle, + args=(MyPickleClass(), torch.ones(2, 2)), + ) + + m = MyPickleClass() + m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2))) + self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2))) + + @dist_init + def test_py_function_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + with self.assertRaises(TypeError): + ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,)) + + @dist_init + def test_py_raise_in_user_func(self): + with captured_output() as (_, err): + # This barrier prevents a race condition where the main thread has + # not entered the context manager when the remote function runs. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func) + with self.assertRaisesRegex(ValueError, expected_err): + fut.wait() + # This barrier prevents a race condition where the main thread exits + # context manager before the remote function has ran. + dist.barrier() + + # Validate that trainers log errors when running functions. + stderr_lines = err.getvalue() + self.assertTrue(expected_err in stderr_lines) + + @dist_init + def test_py_raise_in_user_func_escaped_str(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape) + try: + fut.wait() + except ValueError as e: + msg = str(e) + # Ensure newlines are unescaped to provide a better repr of error. + self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape")) + else: + self.assertTrue(False, "expected raise_func_escape to raise ValueError.") + + @dist_init + def test_nested_rpc(self): + self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1) + + @dist_init + def test_stress_light_rpc(self): + self._stress_test_rpc(light_rpc) + + @dist_init + def test_stress_heavy_rpc(self): + self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_stress_heavy_rpc_torchscript(self): + self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_builtin_remote_ret(self): + self._builtin_remote_ret( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @dist_init + def test_builtin_remote_self(self): + self._builtin_remote_self( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @staticmethod + def _multi_args_fn(n, sparse=False): + if sparse: + return (build_sparse_tensor(), build_sparse_tensor()) + else: + return (torch.ones(n, n), torch.ones(n, n)) + + @dist_init + def test_multi_builtin_remote_ret(self): + self._test_multi_remote_call( + torch.add, False, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_py_udf_remote(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2)) + + @staticmethod + def _multi_kwargs_fn(n, sparse=False): + if sparse: + return { + "a": build_sparse_tensor(), + "b": build_sparse_tensor(), + "c": build_sparse_tensor() + } + else: + return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)} + + @dist_init + def test_multi_py_udf_remote(self): + self._test_multi_remote_call( + my_function, + False, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args(self): + self._py_rref_args( + torch.ones(2, 2), + 1, + torch.ones(2, 2), + 2, + torch.ones(2, 2) * 2 + 3) + + @dist_init + def test_py_rref_args_user_share(self): + self._py_rref_args_user_share( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_py_rpc_rref_args(self): + self._py_rpc_rref_args( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_nested_remote(self): + self._nested_remote( + nested_remote, + torch.ones(2, 2) + 3 + ) + + @dist_init + def test_nested_rref(self): + self._nested_rref( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_nested_rref_stress(self): + self._nested_rref_stress( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_multi_layer_nested_async_rpc(self): + # This test will exit right away, but there will be a chain of async + # RPCs. The termination algorithm should detect those messages properly. + # Otherwise, some peer could exit early, leaving others to timeout + # errors or connection closed errors. + ttl = 20 + n = self.rank + 1 + dst_rank = n % self.world_size + + multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl) + + @dist_init + def test_remote_with_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + # check ref to other workers + rref = rpc.remote(worker_name(dst_rank), raise_func) + with self.assertRaises(ValueError): + rref.to_here() + # check ref to itself + rref = rpc.remote(worker_name(self.rank), no_result, args=(10,)) + with self.assertRaises(TypeError): + rref.to_here() + + @dist_init + def test_rpc_return_rref(self): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref = rpc.rpc_sync( + worker_name(dst_rank1), + rpc_return_rref, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_rref_forward_chain(self): + ttl = 8 + n = self.rank + 1 + dst_rank = n % self.world_size + + rref = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + + ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl) + + for i in range(ttl): + self.assertEqual(len(ret_rref), 1) + ret_rref = ret_rref[0].to_here() + + ret = ret_rref + self.assertEqual(ret, torch.add(torch.ones(n, n), 1)) + + @dist_init + def test_local_rref_no_fork(self): + local_rref = RRef(35) + self.assertEqual(local_rref.local_value(), 35) + + @dist_init + def test_local_value_not_on_owner(self): + # ensure that an error message is thrown if a user tries to call + # local_value() on a non-owning node. + next_rank = (self.rank + 1) % self.world_size + rref = rpc.remote( + worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + with self.assertRaisesRegex( + RuntimeError, ( + fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), " + fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), " + r"can't call localValue\(\) on user " + fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). " + fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)" + ) + ): + rref.local_value() + + @dist_init + def test_return_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + rref_list = rpc.rpc_sync( + worker_name(dst_rank), get_rref_list, args=([1, 2, 3],) + ) + + for rref in rref_list: + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, 10), + ) + + rets = [ + rpc.rpc_sync( + rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref) + ) + for rref in rref_list + ] + + self.assertEqual(rets, [11, 12, 13]) + + @dist_init + def _test_rref_type(self, blocking): + + def launched_rpc(events): + expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner" + return any(e.name.startswith(expected_name) for e in events) + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1)) + + with _profile() as p: + t = rref._get_type(blocking=blocking) + if not blocking: + t = t.wait() + + self.assertTrue(launched_rpc(p.function_events)) + expected_type = type(torch.ones(2)) + self.assertEqual(t, expected_type) + + futs = [] + + def verify(fut): + self.assertEqual(fut.value(), expected_type) + + with _profile() as p: + for _ in range(10): + t = rref._get_type(blocking=blocking) + if not blocking: + futs.append(t) + t.add_done_callback(verify) + t = t.wait() + self.assertEqual(t, expected_type) + + if not blocking: + # Note that cached calls with blocking=False all return the same + # cached original future. + first_fut = futs[0] + for f in futs[1:]: + self.assertTrue(f is first_fut) + # Ensure we never launch another RPC, other than for the very + # first call. + self.assertFalse(launched_rpc(p.function_events)) + self.assertEqual(t, type(torch.ones(2))) + + rref = rpc.remote(dst, MyClass, args=(0,)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_blocking(self): + self._test_rref_type(blocking=True) + + def test_rref_type_non_blocking(self): + self._test_rref_type(blocking=False) + + @dist_init + def _test_rref_type_with_error(self, blocking): + dst = worker_name((self.rank + 1) % self.world_size) + # 10 ms timeout + rref = rpc.remote(dst, raise_func) + # Blocking: error raised inline + if blocking: + with self.assertRaisesRegex(ValueError, "Expected error"): + rref._get_type(blocking=blocking) + else: + # Non-blocking: Immediately return future, block on wait + fut = rref._get_type(blocking=blocking) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + + def test_rref_type_with_error_blocking(self): + self._test_rref_type_with_error(blocking=True) + + def test_rref_type_with_error_non_blocking(self): + self._test_rref_type_with_error(blocking=False) + + @dist_init + def _test_rref_type_owner(self, blocking): + rref = RRef(torch.ones(2) + 1) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, type(torch.ones(2))) + + rref = RRef(MyClass(0)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_owner_blocking(self): + self._test_rref_type_owner(blocking=True) + + def test_rref_type_owner_non_blocking(self): + self._test_rref_type_owner(blocking=False) + + @staticmethod + def _slow_add(x, y): + time.sleep(1) + return x + y + + @dist_init + def test_rref_type_slow_init(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1)) + self.assertEqual(rref._get_type(), type(torch.ones(2))) + + @dist_init + def test_owner_equality(self): + a = RRef(40) + b = RRef(50) + + other_rank = (self.rank + 1) % self.world_size + other_a = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_b = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_a.to_here() # to ensure clean termination + other_b.to_here() + + self.assertNotEqual(a.owner(), 23) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertNotEqual(a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertEqual(a.owner(), a.owner()) + self.assertEqual(a.owner(), b.owner()) + self.assertEqual(a.owner(), rpc.get_worker_info()) + x = {} + x[a.owner()] = a + x[other_a.owner()] = other_a + self.assertEqual(x[a.owner()], a) + self.assertEqual(x[b.owner()], a) + self.assertEqual(x[other_a.owner()], other_a) + self.assertEqual(x[other_b.owner()], other_a) + self.assertEqual(len(x), 2) + + @dist_init + def test_pass_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker = worker_name(dst_rank) + + rref = RRef(40) + self.assertEqual( + rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90 + ) + self.assertEqual( + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90 + ) + self.assertEqual( + rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90 + ) + + @dist_init + def test_remote_same_worker(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4) + + @dist_init(setup_rpc=True) + def test_call_method_on_rref(self): + """ + Tests that it is possible to call an instance method on a remote object + by using rref.owner() as destination of the call. + """ + vals = [10, 2, 5, 7] + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + + # creates a remote object + rref = rpc.remote(dst_worker, MyClass, args=(vals[0],)) + + # modifies state of the remote object + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[1]), + ) + rpc.rpc_async( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[2]), + ).wait() + rpc.remote( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[3]), + ).to_here() + + # queries state of the remote object + result = rpc.rpc_sync( + dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref) + ) + + self.assertEqual(result, sum(vals)) + + # Notice `rpc.api.shutdown()` accesses + # `_delete_all_user_and_unforked_owner_rrefs` through + # `torch.distributed.rpc.api`, so patching + # `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will + # not help. + @mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs") + def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.ones(2, 2), 1), + ) + + import torch.distributed.rpc.api as api + + if ignore_leak: + api._ignore_rref_leak = True + rpc.shutdown(graceful=True) + else: + api._ignore_rref_leak = False + with self.assertRaisesRegex(RuntimeError, "Leaking RRef"): + rpc.shutdown(graceful=True) + + @dist_init(setup_rpc=False) + def test_rref_leak(self): + self._test_rref_leak(ignore_leak=False) + + @dist_init(setup_rpc=False) + def test_ignore_rref_leak(self): + self._test_rref_leak(ignore_leak=True) + + @dist_init + def test_rref_str(self): + rref1 = RRef(self.rank) + id_class = "GloballyUniqueId" + self.assertEqual( + f"OwnerRRef({id_class}(created_on={self.rank}, local_id=0))", rref1.__str__() + ) + + dst_rank = (self.rank + 1) % self.world_size + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + self.assertEqual( + rref2.__str__(), + "UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format( + id_class, self.rank + ), + ) + + @dist_init + def test_rref_get_future(self): + # Tests that we can obtain the future corresponding to the creation of + # the RRef on remote end + if self.rank == 0: + # Builtin + rref = rpc.remote(worker_name(1), torch.add, args=(1, 1)) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # UDF + rref = rpc.remote(worker_name(1), foo_add, args=()) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # Script + rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), )) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + + @dist_init + def test_rref_context_debug_info(self): + # This test checks local states that are modified by remote workers. + # This means that we would need barrier before and after every check. + # The barrier before the check makes sure that all previous states are + # cleared globally, the barrier after ensures that no following states + # change gets into the current check. + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # Check 1: local RRef does not update owners_ map or add a pending user. + ################################################# + + rref1 = RRef(self.rank) + + # don't need a barrier here as local RRef is handled by this thread + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertIn("num_pending_users", info) + # RRef on local value is not added to context until shared across RPC + self.assertEqual(0, int(info["num_owner_rrefs"])) + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after the check 1 + dist.barrier() + + # Check 2: Sharing RRef as an arg should update owners_ map + ########################################################### + + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,)) + + # barrier before check 2 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(1, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after check 2 + dist.barrier() + + # clear states for check 2 + rpc.rpc_sync(worker_name(dst_rank), clear_global_rref) + + # Wait for owner rref to be cleared. + while int(info["num_owner_rrefs"]) != 0: + info = _rref_context_get_debug_info() + time.sleep(0.1) + dist.barrier() + + # Check 3: rpc.remote call should update owners_ map + #################################################### + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref3 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref2.to_here() + rref3.to_here() + + # barrier before check 3 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(2, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + + # barrier after check 3 + dist.barrier() + + @dist_init + def test_disable_gil_profiling(self): + # test that rpc.enable_gil_profiling(false) will result in + # GIL wait time not being recorded. + + # GIL profiling should be disabled by default. + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"]) + rpc.enable_gil_profiling(True) + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertIn("agent.gil_average_wait_time_us", info) + + @dist_init(setup_rpc=False) + def test_local_shutdown(self): + # test that we can start RPC and then immediately locally shutdown + # without sending any messages. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init + def test_debug_info(self): + # only test keys in this test case. Values should be covered by + # individual module debug info tests + import torch.distributed.autograd as dist_autograd + + info = _get_debug_info() + rref_info = _rref_context_get_debug_info() + agent_info = rpc.api._get_current_rpc_agent().get_debug_info() + autograd_info = dist_autograd._get_debug_info() + common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys() + self.assertEqual(0, len(common_keys)) + expected = {} + expected.update(rref_info) + expected.update(agent_info) + expected.update(autograd_info) + # NB: Key ordering is only preserved in python 3.6+. So here, we + # manually check keys are equal. + for key in expected.keys(): + self.assertIn(key, info.keys()) + + for key in info.keys(): + self.assertIn(key, expected.keys()) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_handle_send_exceptions(self): + # test that if a callee node has gone down, we raise an appropriate + # exception instead of just crashing. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc._set_rpc_timeout(10) + # This barrier is needed to ensure that some workers do not exit before + # others have been brought up. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + if self.rank == 1: + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + # allow destination worker to exit without joining + error_str = self.get_shutdown_error_regex() + wait_until_node_failure(dst_rank, error_str) + fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3)) + # Shutdown sequence is not very well defined and as a result + # we can see any of the error messages defined in get_shutdown_error_regex. + with self.assertRaisesRegex(RuntimeError, error_str): + fut.wait() + # exit all workers non-gracefully. + rpc.shutdown(graceful=False) + + @dist_init + def test_deadlock(self): + # this test is copied from https://github.com/pytorch/pytorch/issues/45089 + if self.rank == 1: + dst1 = worker_name((self.rank + 1) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait() + + dist_initialized = dist.is_initialized() + if not dist_initialized: + dist.init_process_group( + backend="gloo", + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size, + ) + + @dist_init(setup_rpc=False) + def test_local_shutdown_with_rpc(self): + # test that we can start RPC, send RPCs, and then run local shutdown. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # A barrier is needed to ensure that all RPCs are processed. + # Otherwise, some RPCs can timeout since the receiving end + # has terminated. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init(setup_rpc=False) + def test_set_and_get_default_rpc_timeout(self): + timeout = 0.5 + + # A new `RpcBackendOptions` is constructed + # when accessing `self.rpc_backend_options`. + rpc_backend_options = self.rpc_backend_options + rpc_backend_options.rpc_timeout = timeout + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + set_timeout = rpc.get_rpc_timeout() + self.assertEqual(timeout, set_timeout) + rpc.shutdown() + + @dist_init + def test_default_timeout_used(self): + """ + Tests that if no timeout is passed into rpc_async and rpc_sync, then the + default timeout is used. + """ + dst_rank = (self.rank + 1) % self.world_size + rpc._set_rpc_timeout(0.001) # 1 ms + # futures should time out and be marked with an exception indicating it as such. + futs = [ + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()) + for _ in range(10) + ] + expected_error = self.get_timeout_error_regex() + for fut in futs: + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # ensure that if a new timeout is set old futures don't time out but new ones do. + rpc._set_rpc_timeout(200) # 200 seconds + # create a longstanding RPC. + fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + # now, set a short timeout. + rpc._set_rpc_timeout(0.001) + # fut2 should time out, fut1 should not. + fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut2.wait() + fut1.wait() + + # Zero timeout means infinity, so future should run to completion. + rpc._set_rpc_timeout(0) + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait() + + # reset to default timeout so shutdown messages can process cleanly. + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init + def test_rpc_timeouts(self): + # TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803) + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + timeout = 0.1 # 100 ms + expected_error = self.get_timeout_error_regex() + # Test async UDF + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait() + + # Test sync UDF + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # If we set a default timeout for RPCs, it should be respected, though + # still overridden if we pass in a different timeout to the APIs. + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # The RPCs should run to completion since we override the timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5) + # Passing in a zero timeout should ensure that the RPC won't time out. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0) + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + def test_dist_init_decorator(self): + @dist_init(setup_rpc=False) + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + @dist_init + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + def test_use_rpc_pickler(self): + class TestPickler: + pass + + test_pickler = TestPickler() + with _use_rpc_pickler(test_pickler): + self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler) + self.assertTrue( + torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler + ) + + @dist_init + def test_wait_all(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + self.assertTrue(len(_thread_local_var.future_list) == 1) + self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future)) + self.assertTrue(fut.done()) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_multiple_call(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + for i in range(20): + fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1)) + res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1)) + self.assertEqual(res, torch.ones(i, i) + 1) + self.assertEqual(fut.wait(), torch.ones(i, i) + 1) + self.assertTrue(len(_thread_local_var.future_list) == 20) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_timeout(self): + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + timeout = 0.1 # 100 ms + fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_user_func(self): + with self.assertRaises(ValueError): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_body(self): + with self.assertRaises(ValueError): + with _wait_all(): + raise_func() + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_custom_exception_throw_during_reconstruction(self): + """ + Test that we still throw info about the remote side exception even when + we cannot recreate it on client side. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + exc_caught = False + dst = worker_name(0) + try: + rpc.rpc_sync(dst, custom_raise_func, args=()) + except RuntimeError as e: + exc_caught = True + msg = str(e) + print(f"Got msg {msg}") + self.assertTrue("Original exception on remote side was" in msg) + self.assertTrue("CustomException" in msg) + except BaseException as e: + raise RuntimeError( + f"Failure - expected RuntimeError, got {e}" + ) from e + finally: + self.assertTrue(exc_caught) + + dist.barrier() + + + timed_out_rpc_event = None + + @staticmethod + def timed_out_rpc(): + RpcTest.timed_out_rpc_event.wait() + + @dist_init + def test_wait_all_exit_early_python(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func) + fut3 = rpc.rpc_async(dst, raise_func) + + # We should receive the error from fut2 + with self.assertRaisesRegex(ValueError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_builtin(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, "size of tensor"): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_script_function(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + + @dist_init + def test_function_not_on_callee(self): + # test that if a function does not exist on a callee, we don't crash, + # instead we get an AttributeError indicating that the func does not exist. + this_module = sys.modules[__name__] + caller_worker = "worker0" + callee_worker = "worker1" + + if self.rank == 1: + # Use delattr to remove the binding of a func on this nodes + delattr(this_module, "foo_add") + # notify remote end that we have removed it. + rpc.rpc_sync(caller_worker, set_value, args=(self.rank,)) + + if self.rank == 0: + # func exists on caller, but not callee. + # wait for remote end to remove the binding of foo_add func. + wait_for_value_future() + # Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error. + self.assertTrue(hasattr(this_module, "foo_add")) + with self.assertRaisesRegex( + RuntimeError, "RPC pickler does not serialize" + ): + rpc.rpc_sync(callee_worker, foo_add, args=()) + + @dist_init + def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + a = MyClass(1) + b = MyClass(2) + + # This is to make Python not garbage collect a and b. + a.other = b + b.other = a + + n = self.rank + a.rref = rpc.remote( + dst_worker_name, + torch.add, + args=(torch.ones(n, n), 2) + ) + + @dist_init(setup_rpc=False) + def test_use_rref_after_shutdown(self): + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # pass in graceful=True to ensure that local UserRRefs are deleted. + rpc.shutdown(graceful=True) + + with self.assertRaisesRegex( + RuntimeError, "Cannot call to_here\\(\\) on it after deletion." + ): + rref.to_here() + + with self.assertRaisesRegex( + RuntimeError, "Cannot call fork an UserRRef after deletion." + ): + import torch.distributed.rpc.internal as internal + internal.serialize(rref) + + @staticmethod + def _return_gpu_tensor(): + return torch.rand(3, 3).cuda(0) + + @staticmethod + def _return_gpu_tensor_list(): + return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)] + + @staticmethod + def _gpu_tensor_list_arg(tensor_list): + return torch.rand(3, 3) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_py_pickle_not_supported(self): + local_rref = RRef(35) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"): + torch.save(local_rref, fname) + + @dist_init + def test_remote_throw(self): + rref = rpc.remote(worker_name((self.rank + 1) % self.world_size), + raise_or_inc, + args=(torch.ones(2),)) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_non_cont_tensors(self): + if self.rank == 0: + # Create a non-contiguous tensor. + t = torch.rand(5, 5) + t_view = t.narrow(1, 2, 2) + self.assertFalse(t_view.is_contiguous()) + t_cont = t_view.contiguous() + self.assertTrue(t_cont.is_contiguous()) + self.assertEqual(t_view, t_cont) + + # Send non-cont tensor over RPC. + next_rank = (self.rank + 1) % self.world_size + t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont)) + + # Verify the returned tensor. + self.assertEqual(t_view, t_ret) + self.assertFalse(t_ret.is_contiguous()) + + @dist_init + def test_callback_simple(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + def callback(fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + set_by_cb.set_result(ret.clone() + 1) + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_wrong_arg_num(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_fut = fut.then(my_function) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + with self.assertRaisesRegex( + RuntimeError, + "my\\_function\\(\\) missing 2 required positional arguments" + ): + cb_fut.wait() + + @dist_init + def test_callback_wrong_arg_type(self): + dst = worker_name((self.rank + 1) % self.world_size) + + fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1)) + fut1 = fut0.then(lambda x: x + 1) + + with self.assertRaisesRegex( + RuntimeError, + "unsupported operand type\\(s\\) for \\+" + ): + fut1.wait() + + @dist_init + def test_callback_multi(self): + num_cbs = 10 + n = self.rank + 1 + + def callback(idx, fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + return ret + idx + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_futs = [] + for idx in range(num_cbs): + cb_futs.append(fut.then(partial(callback, idx))) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + for idx in range(num_cbs): + self.assertEqual( + cb_futs[idx].wait(), + torch.ones(n, n) * 2 + idx + ) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), 1) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_callback_in_rpc(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + add_use_future_cb, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_callback_with_ret(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + fut2 = rpc.rpc_async( + dst, + torch.add, + args=(fut0.wait(), 1) + ).then(lambda fut1: fut1.wait() + 1) + + return fut2.wait() + + fut3 = rpc.rpc_async( + dst, + torch.add, + args=(torch.ones(2, 2), 1) + ).then(callback) + + self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3) + + @dist_init + def test_callback_with_error(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + with self.assertRaisesRegex(ValueError, "Expected error"): + fut0.wait() + raise RuntimeError("Another expected error") + + fut1 = rpc.rpc_async(dst, raise_func).then(callback) + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + fut1.wait() + + @dist_init + def test_callback_none(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + TypeError, + "incompatible function arguments." + ): + rpc.rpc_async(dst, raise_func).then(None) + + @dist_init + def test_add_done_callback(self): + set_by_cb = False + n = self.rank + 1 + + def callback(fut): + nonlocal set_by_cb + fut.wait() + set_by_cb = True + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.add_done_callback(callback) + fut_then = fut.then(lambda _: True) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + fut_then.wait() + self.assertTrue(set_by_cb) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_mark_future_twice(self): + fut = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1) + with self.assertRaisesRegex( + RuntimeError, + "Future can only be marked completed once" + ): + fut.set_result(1) + + @dist_init + def test_pickle_future(self): + fut = torch.futures.Future() + errMsg = "Can not pickle torch.futures.Future" + + dst = worker_name((self.rank + 1) % self.world_size) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_sync(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_async(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.remote(dst, fail_on_fut, args=(fut,)) + + @dist_init + def test_future_done(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1)) + fut.wait() + self.assertTrue(fut.done()) + + @dist_init + def test_future_done_exception(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + self.assertTrue(fut.done()) + + def _test_future_cb(self, func): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + func, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_future_in_rpc(self): + self._test_future_cb(add_use_future_set_result) + + @dist_init + def test_future_nested_callback(self): + self._test_future_cb(add_use_future_nested_cb) + + def _test_async_function_raise(self, mode): + with self.assertRaisesRegex(RuntimeError, "Expected error"): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_raise_func, + mode + ) + + @dist_init + def test_async_function_raise(self): + self._test_async_function_raise(RPCExecMode.SYNC) + + @dist_init + def test_async_function_raise_async(self): + self._test_async_function_raise(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_raise_remote(self): + self._test_async_function_raise(RPCExecMode.REMOTE) + + def _test_async_function_wrong_return_type(self, mode): + errMsg = ( + "Functions decorated with @rpc\\.async_function must return a " + "torch\\.futures\\.Future object," + ) + with self.assertRaisesRegex(RuntimeError, errMsg): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_wrong_type, + mode + ) + + @dist_init + def test_async_function_wrong_return_type(self): + self._test_async_function_wrong_return_type(RPCExecMode.SYNC) + + @dist_init + def test_async_function_wrong_return_type_async(self): + self._test_async_function_wrong_return_type(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + self._test_async_function_wrong_return_type(RPCExecMode.REMOTE) + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + def _test_async_function(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + args = (dst2, torch.ones(2, 2), 1, 2) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + 3) + + @dist_init + def test_async_function_with_future_ctor(self): + self._test_async_function(async_add_with_future_ctor) + + @dist_init + def test_async_function_with_future_ctor_remote(self): + self._test_async_function( + async_add_with_future_ctor, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_chained(self): + self._test_async_function(async_add_chained) + + @dist_init + def test_async_function_chained_remote(self): + self._test_async_function(async_add_chained, RPCExecMode.REMOTE) + + @dist_init + def test_async_function_nested(self): + self._test_async_function(async_add_nested) + + @dist_init + def test_async_function_nested_remote(self): + self._test_async_function(async_add_nested, RPCExecMode.REMOTE) + + @dist_init + def test_async_static_method(self): + self._test_async_function(AsyncExecutionClass.static_async_add) + + @dist_init + def test_async_static_method_remote(self): + self._test_async_function( + AsyncExecutionClass.static_async_add, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_class_method(self): + self._test_async_function(AsyncExecutionClass.class_async_add) + + @dist_init + def test_async_class_method_remote(self): + self._test_async_function( + AsyncExecutionClass.class_async_add, + RPCExecMode.REMOTE + ) + + def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + rref = rpc.remote(dst1, AsyncExecutionClass) + + x = torch.ones(2, 2) + y = torch.ones(2, 2) + 1 + if mode == RPCExecMode.SYNC: + ret = rref.rpc_sync().static_async_add(dst2, x, x, y) + ret += rref.rpc_sync().class_async_add(dst2, x, x, y) + ret += rref.rpc_sync().bound_async_add(dst2, x, x, y) + elif mode == RPCExecMode.ASYNC: + ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait() + elif mode == RPCExecMode.REMOTE: + ret = rref.remote().static_async_add(dst2, x, x, y).to_here() + ret += rref.remote().class_async_add(dst2, x, x, y).to_here() + ret += rref.remote().bound_async_add(dst2, x, x, y).to_here() + + self.assertEqual(ret, 3 * 4 * x) + + @dist_init + def test_async_class_rref_proxy(self): + self._test_test_async_class_rref_proxy() + + @dist_init + def test_async_class_rref_proxy_async(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC) + + @dist_init + def test_async_class_rref_proxy_remote(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE) + + def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + step = 3 + args = (dst2, torch.ones(2, 2), num, step) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + num * step) + + @dist_init + def test_async_function_multi_chained(self): + self._test_async_function_multi(async_add_chained_multi) + + @dist_init + def test_async_function_multi_chained_async(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_chained_remote(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_multi_fanout(self): + self._test_async_function_multi(async_add_multi_fanout) + + @dist_init + def test_async_function_multi_fanout_async(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_fanout_remote(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.REMOTE + ) + + def _test_return_future(self, mode): + with self.assertRaisesRegex( + RuntimeError, + "Can not pickle torch.futures.Future" + ): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + return_future, + mode + ) + + @dist_init + def test_return_future(self): + self._test_return_future(RPCExecMode.SYNC) + + @dist_init + def test_return_future_async(self): + self._test_return_future(RPCExecMode.ASYNC) + + @dist_init + def test_return_future_remote(self): + self._test_return_future(RPCExecMode.REMOTE) + + @dist_init + def test_rref_timeout(self): + # This test is similar to ones in FaultyProcessGroupTest, but is meant to be + # run with other backends besides ProcessGroup. + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + wait_until_owners_and_forks_on_rank(1, 1, rank=1) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_pg_then_rpc(self): + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_rpc_then_pg(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init + def test_wait_all_with_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init + def test_wait_all_with_partial_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1))) + + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491", + ) + def test_init_rpc_twice(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Wait for all init to complete. + dist.barrier() + + # Use a different file name for the next initialization + new_backend_options = self.rpc_backend_options + new_backend_options.init_method += "init_2" + + # Ensure rpc initialization works again. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=new_backend_options, + ) + + # Verify RPCs work after re-init. + dst = worker_name((self.rank + 1) % self.world_size) + rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + rpc.rpc_sync(dst, foo_add, args=()) + + rpc.shutdown() + + def test_wrong_types(self): + with self.assertRaisesRegex( + TypeError, + "Argument backend must be a member of BackendType", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend="TENSORPIPE", + ) + + with self.assertRaisesRegex( + TypeError, + "Argument rpc_backend_options must be an instance of RpcBackendOptions", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=self.rpc_backend, + rpc_backend_options={"init_method": self.init_method} + ) + + def test_cannot_infer_backend_from_options(self): + # An exception should be raised if the backend isn't specified but + # options are given which are not an instance of any of the known + # agents' option classes. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex(TypeError, "Could not infer backend for options"): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + @dist_init + def test_owner_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t1 = torch.rand(10, 10, requires_grad=True) + rref = rpc.RRef(t1.sum() + t1.sum()) + rref.backward() + expected_grad = torch.ones_like(t1) * 2 + self.assertEqual(expected_grad, t1.grad) + + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id) + self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1]) + + # Double backward. + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1]) + + # Test errors. + with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"): + rpc.RRef(torch.rand(10)).backward() + + with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"): + rpc.RRef(torch.rand(10, requires_grad=True)).backward() + + with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"): + rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100) + + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rpc.RRef("foo").backward() + + @staticmethod + def _sum(x): + return x.sum() + + @staticmethod + def _identity(x): + return x + + @dist_init + def test_user_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t = torch.rand(10, requires_grad=True) + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._sum, args=(t,)) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t]) + + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._identity, args=("foo",)) + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rref.backward(context_id) + + with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"): + rref.backward() + + @dist_init(setup_rpc=False) + def test_shutdown_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + if self.rank != 0: + og_func = rpc.api._broadcast_to_followers + og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs + + # Monkey-patch _broadcast_to_followers to fail, which would ensure + # _all_gather on leader raises an exception. + def raise_error(sequence_id, objects_map): + og_func(sequence_id, objects_map) + raise RuntimeError('simulation') + + # Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail, + # which would ensure barrier is not called on followers. + def rref_error(): + raise RuntimeError('simulation rref') + + try: + rpc.api._broadcast_to_followers = raise_error + rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error + with self.assertRaisesRegex(RuntimeError, 'simulation rref'): + rpc.shutdown() + finally: + rpc.api._broadcast_to_followers = og_func + rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func + else: + with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'): + rpc.shutdown() + + dist.barrier() + + @dist_init + def test_my_parameter_server(self): + self._my_parameter_server(False) + + +class CudaRpcTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(2) + @dist_init + def test_profiler_remote_cuda(self): + if self.rank != 1: + return + + dst_cuda_0 = (self.rank + 1) % self.world_size + dst_cuda_1 = (self.rank + 2) % self.world_size + dst_worker_cuda_0 = worker_name(dst_cuda_0) + dst_worker_cuda_1 = worker_name(dst_cuda_1) + + with _profile(use_cuda=True) as p: + fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, )) + fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, )) + fut1.wait() + fut2.wait() + + def get_name(event): + return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):] + + function_events = p.function_events + for event in function_events: + if event.is_async: + self.assertEqual(0, event.cuda_time_total) + self.assertEqual([], event.kernels) + self.assertEqual(0, event.cuda_time) + else: + if event.node_id == 1: + continue + self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) + if get_name(event) in EXPECTED_REMOTE_EVENTS: + self.assertGreater(event.cuda_time_total, 0) + self.assertEqual(1, len(event.kernels)) + kernel = event.kernels[0] + if event.node_id == dst_cuda_0: + self.assertEqual(kernel.device, 0) + if event.node_id == dst_cuda_1: + self.assertEqual(kernel.device, 1) + self.assertGreater(event.cuda_time, 0) + + # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled + # events. + remote_events = [event for event in function_events if event.is_remote] + remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS] + self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS)) + + +class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def test_mismatched_type_for_options(self): + # An exception should be raised if the options are not an instance of + # TensorPipeRpcBackendOptions. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex( + TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`" + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=rpc.BackendType.TENSORPIPE, + rpc_backend_options=rpc_backend_options, + ) + + def test_infer_backend_from_options(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.init_method, + _transports=tp_transports() + ) + + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent) + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_set_and_get_num_worker_threads(self): + NUM_THREADS = 27 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=NUM_THREADS, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_set_default_timeout(self): + # Set a high timeout since it doesn't affect test runtime and ensures + # the test doesn't erroneously timeout due to slow machines. + timeout = 100 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + default_timeout = rpc.get_rpc_timeout() + self.assertEqual(default_timeout, timeout) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_options_throw_on_timedelta_timeout(self): + from datetime import timedelta + + timeout = timedelta() + # Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails + with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + ) + + @dist_init + def _test_rref_get_type_timeout(self, blocking): + # Test where we try to get the type of a RRef from an owner, but RRef + # creation is slower than timeout passed into _get_type. + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.5 + expected_err = self.get_timeout_error_regex() + # Blocking: blocks on inline call + if blocking: + with self.assertRaisesRegex(RuntimeError, expected_err): + slow_rref._get_type(timeout=timeout, blocking=blocking) + # Non-blocking: blocks on wait + else: + fut = slow_rref._get_type(timeout=timeout, blocking=blocking) + with self.assertRaisesRegex(RuntimeError, expected_err): + fut.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + def test_rref_get_type_timeout_blocking(self): + self._test_rref_get_type_timeout(blocking=True) + + def test_rref_get_type_timeout_non_blocking(self): + self._test_rref_get_type_timeout(blocking=False) + + @dist_init + def test_op_with_invalid_args(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Overloaded torch operator invoked from Python failed to match any schema" + ): + rpc.rpc_sync(dst, torch.add, args=()) + + def _test_rref_proxy_timeout(self, rref_proxy_api): + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), )) + # Ensure RRef is created on remote node. + rref.to_here() + rref_api = getattr(rref, rref_proxy_api) + self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}") + expected_error = self.get_timeout_error_regex() + timeout = 2 + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2)) + if rref_api == rref.rpc_async: + result.wait() + elif rref_api == rref.remote: + result._get_future().wait() + + # Case where rpc.remote() is stuck and exceeds timeout + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.01 + rref_api = getattr(slow_rref, rref_proxy_api) + # Note that even when we call rref.rpc_async() in this case, we + # time out in future creation, not waiting for future. This is because + # rref proxy function calls rref._get_type before returning future, + # which blocks on the RRef being created on owner node, until the + # specified timeout. + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2)) + # rpc_async returns immediately and surface a timeout through wait() + if rref_api == slow_rref.rpc_async: + result.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + @dist_init + def test_rref_proxy_timeout(self): + for rpc_api in ["rpc_sync", "rpc_async", "remote"]: + self._test_rref_proxy_timeout(rpc_api) + + @dist_init + def test_send_to_rank_sparse(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test sparse tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor() + y = build_sparse_tensor() + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor(coalesce=True) + y = build_sparse_tensor(coalesce=True) + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + @dist_init + def test_self_py_udf_remote_sparse(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg_sparse(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_remote_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg_sparse(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + def test_world_size_one_sparse(self): + self._world_size_one( + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_multi_rpc_sparse(self): + self._multi_rpc(True) + + def test_wait_all_workers_sparse(self): + self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor()) + + def test_wait_all_workers_twice_sparse(self): + self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor()) + + @dist_init + def test_py_sparse_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [build_sparse_tensor(), build_sparse_tensor()] + ret = rpc.rpc_sync( + worker_name(dst_rank), my_container_sum, args=(a,) + ) + self.assertEqual(ret, my_container_sum(a)) + + @dist_init + def test_nested_rpc_sparse(self): + self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2) + + @dist_init + def test_stress_heavy_rpc_sparse(self): + self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),)) + + @dist_init + def test_builtin_remote_ret_sparse(self): + self._builtin_remote_ret( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_builtin_remote_self_sparse(self): + self._builtin_remote_self( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_multi_builtin_remote_ret_sparse(self): + self._test_multi_remote_call( + torch.add, True, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_multi_py_udf_remote_sparse(self): + self._test_multi_remote_call( + my_function, + True, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args_sparse(self): + self._py_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 4 + ) + + @dist_init + def test_py_rref_args_user_share_sparse(self): + self._py_rref_args_user_share( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_py_rpc_rref_args_sparse(self): + self._py_rpc_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_nested_remote_sparse(self): + self._nested_remote( + nested_remote_sparse, + build_sparse_tensor() + build_sparse_tensor() + ) + + @dist_init + def test_nested_rref_sparse(self): + self._nested_rref( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_nested_rref_stress_sparse(self): + self._nested_rref_stress( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_my_parameter_server_sparse(self): + self._my_parameter_server(True) + + # Test init_rpc without world_size argument + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Dynamic RPC new ranks communicate with existing ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + if self.rank == 0: + for i in range(1, self.world_size): + result = rpc.rpc_sync(worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc + @skip_if_lt_x_gpu(2) + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + options = self.rpc_backend_options + for i in range(1, self.world_size): + dst = worker_name(i) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 1}) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # TODO: Cuda RPC is failing due to: + # terminate called after throwing an instance of 'c10::Error' + # what(): 0 <= device && static_cast(device) < device_allocator.size() + # INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":1937, + # please report a bug to PyTorch. Allocator not initialized for device 1: did you call init? + # dist.barrier() + # if self.rank == 0: + # for i in range(1, self.world_size): + # x = torch.ones(2) + # result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1)) + # result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1)) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0) + # self.assertEqual(torch.device('cuda:0'), result_on_device_0.device) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1) + # self.assertEqual(torch.device('cuda:1'), result_on_device_1.device) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc_without_rank(self): + # default initialization uses file init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=self.rpc_backend_options, + ) + + # env init + with self.assertRaisesRegex(ValueError, "environment variable RANK expected"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + # tcp init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_dynamic_and_static_init_rpc_together(self): + # Initialize a static rpc group with size = self.world_size - 1 + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + world_size_minus_one = self.world_size - 1 + if self.rank < world_size_minus_one: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=world_size_minus_one, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + + # Attempt to add an additional dynamic group member + if self.rank == world_size_minus_one: + # Expect error message to be thrown + with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\ + initialized members which is not supported."): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + +class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def _test_device_maps(self, options, errMsg): + with self.assertRaisesRegex(ValueError, errMsg): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + self.assertFalse(rpc.api._is_current_rpc_agent_set()) + + @skip_if_lt_x_gpu(2) + def test_device_maps_wrong_worker_name(self): + options = self.rpc_backend_options + options.set_device_map("none_exist", {0: 1}) + + self._test_device_maps( + options, + errMsg="Node worker0 has invalid target node names in its device maps" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_local_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {torch.cuda.device_count(): 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has source devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_remote_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: torch.cuda.device_count()}) + + self._test_device_maps( + options, + errMsg="Node worker0 has target devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_many_to_one(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has duplicated target devices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_one_to_many(self): + if self.rank == 0: + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1}) + with self.assertRaisesRegex( + ValueError, "`set_device_map` only supports 1-to-1 mapping" + ): + options.set_device_map(dst, {0: 0}) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_min_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {-1: 0}) + + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {0: -1}) + + @staticmethod + def _gpu_add(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]): + return (x + y).to(0) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_gpu(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1, 1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add, + args=(torch.zeros(2).to(0), torch.ones(2).to(0)) + ) + self.assertEqual(ret.device, torch.device(1)) + self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1)) + rpc.shutdown() + + @staticmethod + def _gpu_add_given_devices(x, y, x_to, y_to, z_to): + x_device = "cpu" if x.device.type == "cpu" else x.device.index + y_device = "cpu" if y.device.type == "cpu" else y.device.index + if x_device == x_to and y_device == y_to: + return x.to(z_to) + y.to(z_to) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None): + fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn + x_to = device_map[x_from] + y_to = device_map[y_from] + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(x_from) + y = torch.ones(2).to(y_from) + + ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to)) + + reverse_device_map = {device_map[k] : k for k in device_map} + z_from = reverse_device_map[z_to] + + ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index + self.assertEqual(ret_device, z_from) + self.assertEqual(ret, torch.ones(2).to(z_from)) + + rpc.shutdown() + + def test_device_map_cpu(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to="cpu", + device_map={"cpu" : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_cpu_to_gpu_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=0, + device_map={"cpu" : 0}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_cpu_to_gpu_non_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=1, + device_map={"cpu" : 1}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_gpu_to_cpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to="cpu", + device_map={0 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_to_cpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to="cpu", + device_map={1 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=0, + device_map={0 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=1, + device_map={1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default_to_non_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=1, + device_map={0 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default_to_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=0, + device_map={1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @staticmethod + def _gpu_add_multi_gpu(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]): + return x.to(0) + y, x - y.to(1) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_multi_gpu(self, dst): + options = self.rpc_backend_options + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(1) + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(x, y) + ) + + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_multi_gpu(dst) + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_multi_gpu(dst) + + @staticmethod + def _gpu_add_return_to_gpu(x, y): + if x.device.type == 'cpu' and y.device.type == 'cpu': + return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_in_options(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc.TensorPipeRpcBackendOptions( + init_method=options.init_method, + num_worker_threads=options.num_worker_threads, + device_maps={dst: {0: 1, 1: 0}}, + _transports=tp_transports() + ) + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(torch.zeros(2).to(0), torch.ones(2).to(1)) + ) + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + def _test_device_maps_return_to_gpu(self, dst): + options = self.rpc_backend_options + + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 2}) + options.set_device_map(dst, {2: 3}) + options.set_device_map(dst, {3: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu, + args=(torch.zeros(2), torch.ones(2)) + ) + for i in range(len(rets)): + self.assertEqual(rets[i].device, torch.device((3 + i) % 4)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1)) + self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2)) + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_return_to_gpu(dst) + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_return_to_gpu(dst) + + @staticmethod + def _add_to_gpu(x, y): + return (x + y).to(0) + + def _test_device_maps_missing_config(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = ( + "TensorPipe RPC backend only supports CPU tensors by default.*" + "`set_device_map` on `TensorPipeRpcBackendOptions`" + ) + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1)) + elif mode == RPCExecMode.REMOTE: + rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + def _test_device_maps_missing_config_response(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = "Response device mapping is not available" + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + elif mode == RPCExecMode.REMOTE: + rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config(self): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + def test_device_maps_missing_config_not_timeout(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options + ) + + timeout = rpc.get_rpc_timeout() + + tik = time.time() + self._test_device_maps_missing_config(RPCExecMode.SYNC) + rpc.shutdown() + tok = time.time() + + self.assertTrue(tok - tik < timeout) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote(self): + self._test_device_maps_missing_config(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(2) + def test_device_maps_remote(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + + self.assertEqual(rref.to_here().device.index, 1) + self.assertEqual(rref.to_here(), torch.ones(2).to(1)) + + rpc.shutdown() + + @staticmethod + def _slow_add_on_user_stream(x, y): + s0 = torch.cuda.current_stream(x.device) + s1 = torch.cuda.Stream(device=x.device) + s1.wait_stream(s0) + x.record_stream(s1) + y.record_stream(s1) + with torch.cuda.stream(s1): + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + z = x + y + s0.wait_stream(s1) + z.record_stream(s0) + return z + + def _test_custom_stream(self, fn, device_map): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + fn(dst) + + rpc.shutdown() + + def _test_stream_sync(self, dst): + x = torch.ones(2, 2).to(0) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + self.assertEqual(ret, 2 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream(self): + self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"}) + + def _test_stream_multi_async(self, dst): + futs = [] + for i in range(20): + x = torch.ones(2, 2).to(0) * i + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + ) + + for i in range(20): + self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_multi(self): + self._test_custom_stream( + self._test_stream_multi_async, + {"cuda:0": "cuda:1"} + ) + + @staticmethod + def _nested_slow_add_on_user_stream(dst, x, y, z): + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, y) + ) + + return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z) + + def _test_stream_nested_sync(self, dst): + x = torch.ones(2, 2).to(0) + y = torch.ones(2, 2).to(0) * 2 + z = torch.ones(2, 2).to(0) * 3 + nested_dst = worker_name((self.rank + 2) % self.world_size) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + self.assertEqual(ret, 6 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested(self): + self._test_custom_stream( + self._test_stream_nested_sync, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + def _test_stream_nested_multi_async(self, dst): + if self.rank == 0: + futs = [] + n = 5 + xs, ys, zs = [], [], [] + for i in range(n): + x = torch.ones(2, 2).to(0) * (i - 1) + y = torch.ones(2, 2).to(0) * i + z = torch.ones(2, 2).to(0) * (i + 1) + xs.append(x) + ys.append(y) + zs.append(z) + nested_dst = worker_name((self.rank + 2) % self.world_size) + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + ) + + for i in range(n): + self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i]) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested_multi(self): + self._test_custom_stream( + self._test_stream_nested_multi_async, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + @staticmethod + def _gpu_add_wrong_gpus(x, y): + if x.is_cuda and y.is_cuda: + return x.cpu() + y.cuda() + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(1) + def test_device_mismatch(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(0) + + with self.assertRaisesRegex( + RuntimeError, + "Expected all tensors to be on the same device, but found at least two devices" + ): + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus, + args=(x, y) + ) + + rpc.shutdown() + + def _test_rref_synchronization(self, local_device, remote_device): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device : remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + x = torch.randn(200, 1, 28, 28).to(local_device) + actual = rref.remote().forward(x).to_here() + expected = rref.rpc_sync().forward(x) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_to_here_synchronization1(self): + self._test_rref_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization2(self): + self._test_rref_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization3(self): + self._test_rref_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization4(self): + self._test_rref_synchronization("cuda:0", "cuda:1") + + def _test_rref_as_arg_synchronization( + self, + local_device, + remote_device, + devicesOptions=None + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device: remote_device}) + + input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size) + options.set_device_map(input_src, {remote_device: local_device}) + + if devicesOptions is not None: + options.set_devices(devicesOptions[self.rank]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + actual = rref.remote().forward(rref_x, True).to_here() + expected = rref.rpc_sync().forward(rref_x, True) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization1(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization2(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization3(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization4(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization5(self): + self._test_rref_as_arg_synchronization( + "cuda:0", + "cuda:0", + [["cuda:0"] for _ in range(4)], # devicesOptions + ) + + @staticmethod + def _rref_relay(rref): + return rref.to_here() + + def _test_rref_forward_synchronization(self, local_device, remote_device): + options = self.rpc_backend_options + + input_src = worker_name(0) + model_dst = worker_name(1) + out_relay = worker_name(2) + + if self.rank == 0: + # for 1) model construction 2) forward execution + options.set_device_map(model_dst, {local_device: remote_device}) + + # Forward output will be first copied to the relay node before + # returning to the worker. This is intentional, to test RRef + # forward CUDA stream synchronizations. + options.set_device_map(out_relay, {local_device: local_device}) + elif self.rank == 1: + # worker1 hosts the model and runs forward. The forward functions + # calls RRef.to_here(), hence needs to configure the device map + options.set_device_map(input_src, {remote_device: local_device}) + elif self.rank == 2: + # worker2 will get the out RRef and call to_here() and hence, needs + # to configure device map. + options.set_device_map(model_dst, {local_device: remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + rref_out = rref.remote().forward(rref_input, True) + out = rpc.remote( + out_relay, + TensorPipeAgentCudaRpcTest._rref_relay, + args=(rref_out,) + ).to_here() + expected = rref.rpc_sync().forward(rref_input, True) + self.assertEqual(out, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_forward_synchronization1(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization2(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization3(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization4(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:1") + + def _test_owner_rref_forward_synchronization(self, local_device, remote_device): + if self.rank == 0: + options = self.rpc_backend_options + options.set_device_map("w0", {local_device: remote_device}) + rpc.init_rpc( + "w0", + rank=0, + world_size=1, + rpc_backend_options=options + ) + + model = rpc.remote( + "w0", torch.nn.Linear, (2048, 20000) + ).remote().to(remote_device) + for _ in range(30): + data = torch.rand(2048, 2048).to(local_device) + output = model.rpc_sync().forward(data) + # to_here() internally calls localValue as the caller is + # the owner of the RRef. + v0 = rpc.RRef(output).remote().sum().to_here().item() + v1 = output.sum().item() + self.assertEqual(v0, v1) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_owner_rref_forward_synchronization1(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization2(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization3(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization4(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1") + + @staticmethod + def _return_tensor_view(i): + x = torch.ones(1000, 200).cuda(0) * i + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + # serialization of the return value will create a new tensor from the + # view, which is done outside of the user function. + return x.split(100)[0] + + @skip_if_lt_x_gpu(1) + def test_tensor_view_as_return_value(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + futs = [] + for i in range(5): + futs.append(rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._return_tensor_view, + args=(i,) + )) + + for i in range(5): + self.assertEqual(torch.ones(100, 200) * i, futs[i].wait()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected source devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + options.set_devices([1]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch_reverse(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected target devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + + options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + device_maps={dst: {0 : 1}}, + devices=[0] + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_int(self): + fut = Future(devices=[0]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_str(self): + fut = Future(devices=["cuda:0"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_device(self): + fut = Future(devices=[torch.device("cuda", 0)]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_not_cuda(self): + with self.assertRaisesRegex( + ValueError, "Expected devices to have indices, got cpu" + ): + fut = Future(devices=["cpu"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_callback_changes_devices(self): + # We check proper CUDA stream synchronization by filling the tensor with + # the expected value in one stream, and reading it from another stream. + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:0", "cuda:1"]) + + def cb(fut): + t0 = fut.value() + tensor1.copy_(t0, non_blocking=True) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_value_on_bad_device(self): + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:1"]) + + # As a plus, we test that futures still invoke callbacks even in case of + # error, and that the child futures are successful if those callbacks + # don't access the parent future. + def cb(fut): + with torch.cuda.device("cuda:1"): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor1.fill_(1) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with self.assertRaisesRegex( + ValueError, + r"The result contained tensors residing on device\(s\) cuda:0 " + r"which are not among the expected device\(s\) cuda:1", + ): + parent_future.wait() + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(1) + def test_async_execution_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t = torch.zeros((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 1).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_async_execution_nested_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + nested_dst = worker_name((self.rank + 2) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + a = torch.ones((100,), device="cuda:0") + b = torch.ones((100,), device="cuda:0") + c = torch.ones((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 3).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_modify_tensor_inplace(self): + tensor = torch.zeros((100,), device="cuda:0") + future = Future(devices=["cuda:0"]) + future.set_result(tensor) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + tensor.fill_(1) + future.wait() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_replace_tensor(self): + tensor_list = [torch.zeros((100,), device="cuda:0")] + future = Future(devices=["cuda:0"]) + future.set_result(tensor_list) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + # We set things up so that the original tensor contained in the list + # gets deleted once we replace it with the other one. This will + # invalidate any cached information held by the future. + tensor_list[0] = torch.ones((100,), device="cuda:0") + future.wait() + + @skip_if_lt_x_gpu(1) + def test_rref_with_unpickleable_attributes(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),)) + rref.rpc_sync().increase(1) + ret = rref.rpc_sync().sum() + self.assertEqual(ret, 42) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=True + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..352d5d47b479fd400c1ff5ed84defdc8a6efc1d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py @@ -0,0 +1,34 @@ +# mypy: ignore-errors + +import torch.distributed.rpc as rpc +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import ( + tp_transports, +) + + +class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture): + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + _transports=tp_transports() + ) + + def get_shutdown_error_regex(self): + # FIXME Once we consolidate the error messages returned by the + # TensorPipe agent put some more specific regex here. + error_regexes = [".*"] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than"