diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b70e55b763878dbb373539e7ae65a08daae96851 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/__init__.pyi @@ -0,0 +1,3533 @@ +# @generated from torch/_C/__init__.pyi.in +# mypy: disable-error-code="type-arg" + +import builtins +from enum import Enum, IntEnum +from pathlib import Path +from typing import ( + Any, + AnyStr, + BinaryIO, + Callable, + ContextManager, + Dict, + Generic, + Iterable, + Iterator, + List, + Literal, + NamedTuple, + Optional, + Protocol, + Sequence, + Set, + SupportsIndex, + Tuple, + Type, + TypeVar, + Union, + overload, + runtime_checkable, +) +from typing_extensions import ParamSpec + +import torch +from torch import inf, SymInt, Tensor +from torch.autograd.graph import Node as _Node +from torch.package import PackageExporter +from torch.storage import UntypedStorage, TypedStorage +from torch.types import ( + _bool, + _complex, + _device, + _dispatchkey, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, + Storage, +) + +from torch._prims_common import DeviceLikeType + +# This module is defined in torch/csrc/Module.cpp + +from . import _functorch, _lazy, _lazy_ts_backend, _nn, _onnx, _VariableFunctions, _cpu + +K = TypeVar("K") +T = TypeVar("T") +S = TypeVar("S", bound="torch.Tensor") +P = ParamSpec("P") +ReturnVal = TypeVar("ReturnVal", covariant=True) # return value (always covariant) +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + References:: + `numpy._typing._NestedSequence` + + """ + + def __len__(self, /) -> builtins.int: ... + def __getitem__(self, index: builtins.int, /) -> _T_co | _NestedSequence[_T_co]: ... + def __contains__(self, x: builtins.object, /) -> builtins.bool: ... + def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ... + def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ... + def count(self, value: Any, /) -> builtins.int: ... + def index(self, value: Any, /) -> builtins.int: ... + + +# Defined in torch/csrc/Device.cpp +class device: + type: str # THPDevice_type + index: _int # THPDevice_index + + def __get__(self, instance, owner=None) -> device: ... + + # THPDevice_pynew + @overload + def __init__(self, device: DeviceLikeType) -> None: ... + @overload + def __init__(self, type: str, index: _int) -> None: ... + + # Uncomment if we ever make torch.device a decorator + # def __call__(self, func: T) -> T: ... + + def __enter__(self) -> device: ... + def __exit__(self, exc_type, exc_val, exc_tb) -> None: ... + def __reduce__(self) -> Tuple[Any, ...]: ... # THPDevice_reduce + +# Defined in torch/csrc/Stream.cpp +class Stream: + stream_id: _int # Stream id + device_index: _int + device_type: _int + + device: device # The device of the stream + +# Defined in torch/csrc/Size.cpp +class Size(Tuple[_int, ...]): + # TODO: __reduce__ + + @overload # type: ignore[override] + def __getitem__(self: Size, key: _int) -> _int: ... + @overload + def __getitem__(self: Size, key: slice) -> Size: ... + def numel(self: Size) -> _int: ... + +# Defined in torch/csrc/Dtype.cpp +class dtype: + # TODO: __reduce__ + is_floating_point: _bool + is_complex: _bool + is_signed: _bool + itemsize: _int + def to_real(self) -> dtype: ... + def to_complex(self) -> dtype: ... + +# Defined in torch/csrc/TypeInfo.cpp +class iinfo: + bits: _int + min: _int + max: _int + dtype: str + + def __init__(self, dtype: _dtype) -> None: ... + +class finfo: + bits: _int + min: _float + max: _float + eps: _float + tiny: _float + smallest_normal: _float + resolution: _float + dtype: str + + @overload + def __init__(self, dtype: _dtype) -> None: ... + @overload + def __init__(self) -> None: ... + +float32: dtype = ... +float: dtype = ... +float64: dtype = ... +double: dtype = ... +float16: dtype = ... +bfloat16: dtype = ... +float8_e4m3fn: dtype = ... +float8_e4m3fnuz: dtype = ... +float8_e5m2: dtype = ... +float8_e5m2fnuz: dtype = ... +half: dtype = ... +uint8: dtype = ... +int8: dtype = ... +int16: dtype = ... +short: dtype = ... +int32: dtype = ... +int: dtype = ... +int64: dtype = ... +long: dtype = ... +complex32: dtype = ... +complex64: dtype = ... +chalf: dtype = ... +cfloat: dtype = ... +complex128: dtype = ... +cdouble: dtype = ... +quint8: dtype = ... +qint8: dtype = ... +qint32: dtype = ... +bool: dtype = ... +quint4x2: dtype = ... +quint2x4: dtype = ... +bits1x8: dtype = ... +bits2x4: dtype = ... +bits4x2: dtype = ... +bits8: dtype = ... +bits16: dtype = ... + +# Defined in torch/csrc/Layout.cpp +class layout: ... + +# Defined in torch/csrc/utils/disable_torch_function.cpp +def DisableTorchFunction(): ... +def DisableTorchFunctionSubclass(): ... + +# Defined in torch/csrc/utils/tensor_layouts.cpp +strided: layout = ... +sparse_coo: layout = ... +sparse_csr: layout = ... +sparse_csc: layout = ... +sparse_bsr: layout = ... +sparse_bsc: layout = ... +_mkldnn: layout = ... +jagged: layout = ... + +# Defined in torch/csrc/MemoryFormat.cpp +class memory_format: ... + +# Defined in torch/csrc/utils/tensor_memoryformats.cpp +contiguous_format: memory_format = ... +channels_last: memory_format = ... +channels_last_3d: memory_format = ... +preserve_format: memory_format = ... + +# Defined in torch/csrc/QScheme.cpp +class qscheme: ... + +# Defined in torch/csrc/utils/tensor_qschemes.h +per_tensor_affine: qscheme = ... +per_channel_affine: qscheme = ... +per_tensor_symmetric: qscheme = ... +per_channel_symmetric: qscheme = ... +per_channel_affine_float_qparams: qscheme = ... + +# Defined in torch/csrc/autograd/python_function.cpp +class _FunctionBase: + saved_tensors: Tuple[Tensor] + _raw_saved_tensors: Tuple[Any] + next_functions: Tuple[Tuple[Any, _int], ...] + needs_input_grad: Tuple[_bool] + metadata: dict + _materialize_non_diff_grads: _bool + # skip adding type hints for the fields that have wrappers defined + # in torch/autograd/function.py + +# Defined in torch/csrc/autograd/python_legacy_variable.cpp +class _LegacyVariableBase(Tensor): # inherits from Tensor to appease mypy + def __init__( + self, + data: Optional[Tensor] = ..., + requires_grad: Optional[_bool] = ..., + volatile: Optional[_bool] = ..., + _grad_fn: Optional[_FunctionBase] = ..., + ) -> None: ... + +# Defined in torch/csrc/jit/python/init.cpp +class IODescriptor: ... +class JITException: ... + +class Future(Generic[T]): + def __init__(self, devices: List[device]) -> None: ... + def done(self) -> _bool: ... + def value(self) -> T: ... + def wait(self) -> T: ... + def add_done_callback(self, callback: Callable) -> None: ... + def then(self, callback: Callable) -> Future[T]: ... + def set_result(self, result: T) -> None: ... + def _set_unwrap_func(self, callback: Callable) -> None: ... + +class _Await: + def __init__(self) -> None: ... + def fn(self) -> Callable: ... + def args(self) -> Tuple[Any, ...]: ... + def is_nowait(self) -> _bool: ... + +def _jit_set_num_profiled_runs(num: _size) -> _size: ... + +# Defined in torch/csrc/jit/passes/mobile_optimizer_type.h +class _MobileOptimizerType: ... + +CONV_BN_FUSION: _MobileOptimizerType +INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType +REMOVE_DROPOUT: _MobileOptimizerType +FUSE_ADD_RELU: _MobileOptimizerType +HOIST_CONV_PACKED_PARAMS: _MobileOptimizerType +VULKAN_AUTOMATIC_GPU_TRANSFER: _MobileOptimizerType + +def fork(*args: Any, **kwargs: Any) -> Future: ... +def wait(fut: Future) -> Any: ... +def _awaitable(*args: Any, **kwargs: Any) -> _Await: ... +def _awaitable_wait(aw: _Await) -> Any: ... +def _awaitable_nowait(x: Any) -> _Await: ... +def _collect_all(futures: List[Future]) -> Future: ... +def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ... +def unify_type_list(types: List[JitType]) -> JitType: ... +def _freeze_module( + module: ScriptModule, + preserved_attrs: List[str] = [], + freeze_interfaces: _bool = True, + preserveParameters: _bool = True, +) -> ScriptModule: ... +def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ... +def _jit_pass_optimize_for_inference( + module: torch.jit.ScriptModule, + other_methods: List[str] = [], +) -> None: ... +def _jit_pass_fold_frozen_conv_bn(graph: Graph): ... +def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ... +def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ... +def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ... +def _jit_pass_concat_frozen_linear(graph: Graph): ... +def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ... +def _jit_pass_transpose_frozen_linear(graph: Graph): ... +def _jit_pass_remove_dropout(module: torch.jit.ScriptModule): ... +def _is_tracing() -> _bool: ... +def _jit_init() -> _bool: ... +def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ... +def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ... +def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ... +def _get_operation_overload( + op_name: str, + op_overload_name: str, +) -> Tuple[Callable, Callable, List[Any]]: ... +def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ... +def _jit_pass_optimize_for_mobile( + module: torch.jit.ScriptModule, + optimization_blocklist: Set[_MobileOptimizerType], + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _clone_module_with_class( + module: torch.jit.ScriptModule, + ignored_methods: List[AnyStr], + ignored_attributes: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_vulkan_optimize_for_mobile( + module: torch.jit.ScriptModule, + optimization_blocklist: Set[_MobileOptimizerType], + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_metal_optimize_for_mobile( + module: torch.jit.ScriptModule, + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_inline(Graph) -> None: ... +def _jit_pass_constant_propagation(Graph) -> None: ... +def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ... +def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ... +def _jit_erase_non_input_shape_information(Graph) -> None: ... +def _jit_get_schemas_for_operator(name: str) -> List[FunctionSchema]: ... +def _jit_get_all_schemas() -> List[FunctionSchema]: ... +def _jit_check_alias_annotation( + g: Graph, + args: Tuple[Any, ...], + unqualified_op_name: str, +): ... +def _jit_can_fuse_on_cpu() -> _bool: ... +def _jit_can_fuse_on_gpu() -> _bool: ... +def _jit_can_fuse_on_cpu_legacy() -> _bool: ... +def _debug_get_fusion_group_inlining() -> _bool: ... +def _debug_set_fusion_group_inlining(enable: _bool): ... +def _jit_texpr_fuser_enabled() -> _bool: ... +def _jit_nvfuser_enabled() -> _bool: ... +def _jit_llga_enabled() -> _bool: ... +def _jit_set_llga_enabled(enable: _bool): ... +def _llvm_enabled() -> _bool: ... +def _jit_override_can_fuse_on_cpu(override: _bool): ... +def _jit_override_can_fuse_on_gpu(override: _bool): ... +def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ... +def _jit_set_symbolic_shapes_test_mode(override: _bool): ... +def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ... +def _jit_set_texpr_fuser_enabled(enable: _bool): ... +def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ... +def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ... +def _jit_cat_wo_conditionals(optimize_cat: _bool): ... +def _jit_opt_conditionals(opt_conds: _bool): ... +def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ... +def _jit_pass_erase_shape_information(graph: Graph): ... +def _jit_pass_fold_convbn(module: torch.jit.ScriptModule): ... +def _jit_pass_insert_observers( + module: torch.jit.ScriptModule, + method_name: str, + qconfig_dict: Dict[str, Any], + inplace: _bool, + quant_type: _int, +): ... +def _jit_pass_insert_quant_dequant( + module: torch.jit.ScriptModule, + method_name: str, + inplace: _bool, + debug: _bool, + quant_type: _int, +): ... +def _jit_pass_insert_quant_dequant_for_ondevice_ptq( + module: torch.jit.ScriptModule, + method_name: str, + inplace: _bool, + debug: _bool, + quant_type: _int, +): ... +def _jit_pass_quant_finalize( + module: torch.jit.ScriptModule, + quant_type: _int, + preserved_attrs: Sequence[str], +): ... +def _jit_pass_quant_finalize_for_ondevice_ptq( + module: torch.jit.ScriptModule, + quant_type: _int, + method_name: str, +): ... +def _jit_pass_insert_observer_method_for_ondevice_ptq( + module: torch.jit.ScriptModule, + method_name: str, + qconfig_dict: Dict[str, Any], + inplace: _bool, + quant_type: _int, +): ... +def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ... +def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ... +def _jit_set_fusion_strategy( + strategy: List[Tuple[str, _int]], +) -> List[Tuple[str, _int]]: ... +def _jit_try_infer_type(obj: Any) -> InferredType: ... +def _jit_get_trigger_value(trigger_name: str) -> _int: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +ResolutionCallback = Callable[[str], Callable[..., Any]] + +# Defined in torch/csrc/jit/python/script_init.cpp +# and torch/csrc/jit/python/init.cpp +def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ... +def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ... +def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ... +def _jit_assert_is_instance(obj: Any, type: JitType): ... +def _jit_clear_class_registry() -> None: ... +def _jit_set_emit_hooks( + ModuleHook: Optional[Callable], + FunctionHook: Optional[Callable], +) -> None: ... +def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ... +def _load_for_lite_interpreter( + filename: Union[str, Path], + map_location: Optional[DeviceLikeType], +): ... +def _load_for_lite_interpreter_from_buffer( + buffer: BinaryIO, + map_location: Optional[DeviceLikeType], +): ... +def _export_operator_list(module: LiteScriptModule): ... +def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ... +def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ... +def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ... +def _backport_for_mobile( + filename_input: Union[str, Path], + filename_output: Union[str, Path], + to_version: _int, +) -> None: ... +def _backport_for_mobile_from_buffer( + buffer: BinaryIO, + filename_output: Union[str, Path], + to_version: _int, +) -> None: ... +def _backport_for_mobile_to_buffer( + filename_input: Union[str, Path], + to_version: _int, +) -> bytes: ... +def _backport_for_mobile_from_buffer_to_buffer( + buffer: BinaryIO, + to_version: _int, +) -> bytes: ... +def _get_model_ops_and_info(filename: Union[str, Path]): ... +def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ... +def _get_mobile_model_contained_types(filename: Union[str, Path]): ... +def _get_mobile_model_contained_types_from_buffer(buffer: BinaryIO): ... +def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ... +def _get_graph_executor_optimize(optimize: Optional[_bool] = None) -> _bool: ... +def _set_graph_executor_optimize(optimize: _bool): ... +def _export_opnames(module: ScriptModule) -> List[str]: ... +def _create_function_from_trace( + qualname: str, + func: Callable[..., Any], + input_tuple: Tuple[Any, ...], + var_lookup_fn: Callable[[Tensor], str], + strict: _bool, + force_outplace: _bool, + argument_names: List[str], +) -> Tuple[Graph, Stack]: ... +def _create_function_from_trace_with_dict( + qualname: str, + func: Callable[..., Any], + input_dict: Dict[str, Any], + var_lookup_fn: Callable[[Tensor], str], + strict: _bool, + force_outplace: _bool, + argument_names: List[str], +) -> Tuple[Graph, Stack]: ... +def _jit_is_script_object(obj: Any) -> _bool: ... +def _last_executed_optimized_graph() -> Graph: ... +def parse_type_comment(comment: str) -> Decl: ... +def _get_upgraders_map_size() -> _int: ... +def _get_upgraders_entry_map() -> Dict[str, str]: ... +def _dump_upgraders_map() -> Dict[str, str]: ... +def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ... +def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ... +def merge_type_from_type_comment( + decl: Decl, + type_annotation_decl: Decl, + is_method: _bool, +) -> Decl: ... +def parse_ir(input: str, parse_tensor_constants: _bool = False) -> Graph: ... +def parse_schema(schema: str) -> FunctionSchema: ... +def get_device(input: Tensor) -> _int: ... +def _resolve_type_from_object( + obj: Any, + range: SourceRange, + rcb: ResolutionCallback, +) -> JitType: ... +def _create_module_with_type(ty: JitType) -> ScriptModule: ... +def _create_object_with_type(ty: ClassType) -> ScriptObject: ... +def _run_emit_module_hook(m: ScriptModule): ... +def _replace_overloaded_method_decl( + overload_decl: Decl, + implementation_def: Def, + new_name: str, +) -> Def: ... +def _jit_pass_lower_all_tuples(graph: Graph) -> None: ... +def _jit_pass_onnx_set_dynamic_input_shape( + graph: Graph, + dynamic_axes: Dict[str, Dict[_int, str]], + input_names: List[str], +) -> None: ... +def _jit_pass_onnx_graph_shape_type_inference( + graph: Graph, + params_dict: Dict[str, IValue], + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_assign_output_shape( + graph: Graph, + tensors: List[Tensor], + desc: IODescriptor, + onnx_shape_inference: _bool, + is_script: _bool, + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_remove_inplace_ops_for_onnx( + graph: Graph, + module: Optional[ScriptModule] = None, +) -> None: ... +def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ... +def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ... +def _jit_pass_peephole( + graph: Graph, + disable_shape_peepholes: _bool = False, +) -> None: ... +def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ... +def _jit_pass_fuse_addmm(graph: Graph) -> None: ... +def _jit_pass_onnx_preprocess(graph: Graph) -> None: ... +def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ... +def _jit_pass_onnx_remove_print(graph: Graph) -> None: ... +def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ... +def _jit_pass_onnx_unpack_quantized_weights( + graph: Graph, + paramsDict: Dict[str, IValue], + caffe2: _bool, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_quantization_insert_permutes( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_custom_pattern_based_rewrite_graph( + pattern: str, + fused_node_name: str, + graph: Graph, +) -> None: ... +def _jit_onnx_list_model_parameters( + module: ScriptModule, +) -> Tuple[ScriptModule, List[IValue]]: ... +def _jit_pass_erase_number_types(graph: Graph) -> None: ... +def _jit_pass_onnx_lint(graph: Graph) -> None: ... +def _jit_pass_onnx( + graph: Graph, + _jit_pass_onnx: _onnx.OperatorExportTypes, +) -> Graph: ... +def _jit_pass_onnx_scalar_type_analysis( + graph: Graph, + lowprecision_cast: _bool, + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_peephole( + graph: Graph, + opset_version: _int, + fixed_batch_size: _bool, +) -> None: ... +def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ... +def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ... +def _jit_pass_onnx_function_extraction( + graph: Graph, + module_names: Set[str], + param_names: List[str], +) -> Dict[Node, Dict[str, str]]: ... +def _jit_pass_onnx_clear_scope_records() -> None: ... +def _jit_pass_onnx_track_scope_attributes( + graph: Graph, + onnx_attrs: Dict[str, Any], +) -> None: ... +def _jit_is_onnx_log_enabled() -> _bool: ... +def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ... +def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ... +def _jit_onnx_log(*args: Any) -> None: ... +def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ... +def _jit_pass_inline_fork_wait(graph: Graph) -> None: ... +def _jit_pass_onnx_deduplicate_initializers( + graph: Graph, + params_dict: Dict[str, IValue], + is_train: _bool, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_eval_peephole( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_constant_fold( + graph: Graph, + paramsDict: Dict[str, IValue], + opset_version: _int, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_eliminate_unused_items( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ... +def _jit_pass_filter_non_tensor_arguments( + params: Dict[str, IValue], +) -> Dict[str, Tensor]: ... +def _jit_decay_packed_param_input_types(graph: Graph) -> None: ... +def _jit_pass_onnx_node_shape_type_inference( + n: Node, + paramsDict: Dict[str, IValue], + opset_version: _int, +) -> None: ... +def _jit_onnx_convert_pattern_from_subblock( + block: Block, + n: Node, + env: Dict[Value, Value], +) -> List[Value]: ... +def _jit_pass_onnx_block( + old_block: Block, + new_block: Block, + operator_export_type: _onnx.OperatorExportTypes, + env: Dict[Value, Value], + is_sub_block: _bool, +) -> Dict[Value, Value]: ... +def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ... +def _jit_pass_fixup_onnx_controlflow_node( + n: Node, + opset_version: _int, +) -> List[Value]: ... +def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ... +def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ... +def _generate_upgraders_graph() -> Dict[str, Graph]: ... +def _calculate_package_version_based_on_upgraders(val: _bool): ... +def _get_version_calculator_flag() -> _bool: ... +def _jit_script_interface_compile( + name: str, + class_def: ClassDef, + rcb: ResolutionCallback, + is_module: _bool, +): ... +def _jit_script_compile_overload( + qualname: str, + overload_decl: Decl, + implementation_def: Def, + rcb: ResolutionCallback, + implementation_defaults: Dict[str, Any], + signature: Any, +): ... +def _jit_script_compile( + qual_name: str, + definition: Def, + rcb: ResolutionCallback, + defaults: Dict[str, Any], +): ... +def _jit_script_class_compile( + qual_name: str, + definition: ClassDef, + defaults: Dict[str, Dict[str, Any]], + rcb: ResolutionCallback, +): ... +def _parse_source_def(src: str) -> Def: ... +def import_ir_module( + cu: CompilationUnit, + filename: Union[str, Path], + map_location: Optional[DeviceLikeType], + extra_files: Dict[str, Any], +) -> ScriptModule: ... +def import_ir_module_from_buffer( + cu: CompilationUnit, + buffer: BinaryIO, + map_location: Optional[DeviceLikeType], + extra_files: Dict[str, Any], +) -> ScriptModule: ... +def _import_ir_module_from_package( + cu: CompilationUnit, + reader: PyTorchFileReader, + storage_context: DeserializationStorageContext, + map_location: Optional[DeviceLikeType], + ts_id: str, +) -> ScriptModule: ... +def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ... +def _check_onnx_proto(proto: str) -> None: ... +def _propagate_and_assign_input_shapes( + graph: Graph, + inputs: Tuple[Tensor, ...], + param_count_list: List[_int], + with_grad: _bool, + propagate: _bool, +) -> Graph: ... + +# Defined in torch/csrc/jit/runtime/graph_executor.h +class GraphExecutorState: ... + +# Defined in torch/torch/csrc/jit/ir/alias_analysis.h +class AliasDb: + def __str__(self) -> str: ... + +class _InsertPoint: + def __enter__(self) -> None: ... + def __exit__(self, *args) -> None: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Use: + @property + def user(self) -> Node: ... + @property + def offset(self) -> _int: ... + def isAfter(self, other: Use) -> _bool: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Value: + def type(self) -> JitType: ... + def setType(self, t: JitType) -> Value: ... + def setTypeAs(self, other: Value) -> Value: ... + def inferTypeFrom(self, t: Tensor) -> None: ... + def debugName(self) -> str: ... + def setDebugName(self, name: str) -> None: ... + def unique(self) -> _int: ... + def offset(self) -> _int: ... + def node(self) -> Node: ... + def uses(self) -> List[Use]: ... + def replaceAllUsesWith(self, val: Value) -> None: ... + def replaceAllUsesAfterNodeWith(self, node: Node, val: Value) -> None: ... + def requires_grad(self) -> _bool: ... + def requiresGrad(self) -> _bool: ... + def copyMetadata(self, other: Value) -> Value: ... + def isCompleteTensor(self) -> _bool: ... + def toIValue(self) -> IValue: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Block: + def inputs(self) -> Iterator[Value]: ... + def outputs(self) -> Iterator[Value]: ... + def nodes(self) -> Iterator[Node]: ... + def paramNode(self) -> Node: ... + def returnNode(self) -> Node: ... + def owningNode(self) -> Node: ... + def registerOutput(self, n: Value) -> _int: ... + def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Node: + def __getitem__(self, key: str) -> Any: ... + def schema(self) -> str: ... + def input(self) -> Value: ... + def inputs(self) -> Iterator[Value]: ... + def inputsAt(self, idx: _int) -> Value: ... + def inputsSize(self) -> _int: ... + def output(self) -> Value: ... + def outputs(self) -> Iterator[Value]: ... + def outputsAt(self, idx: _int) -> Value: ... + def outputsSize(self) -> _int: ... + def hasMultipleOutputs(self) -> _bool: ... + def blocks(self) -> List[Block]: ... + def addBlock(self) -> Block: ... + def mustBeNone(self) -> _bool: ... + def matches(self, pattern: str) -> _bool: ... + def kind(self) -> str: ... + def kindOf(self, name: str) -> str: ... + def addInput(self, name: str) -> Value: ... + def replaceInput(self, i: _int, newValue: Value) -> Value: ... + def replaceInputWith(self, from_: Value, to: Value) -> None: ... + def replaceAllUsesWith(self, n: Node) -> None: ... + def insertBefore(self, n: Node) -> Node: ... + def insertAfter(self, n: Node) -> Node: ... + def isBefore(self, n: Node) -> _bool: ... + def isAfter(self, n: Node) -> _bool: ... + def moveBefore(self, n: Node) -> None: ... + def moveAfter(self, n: Node) -> None: ... + def removeInput(self, i: _int) -> None: ... + def removeAllInputs(self, i: _int) -> None: ... + def hasUses(self) -> _bool: ... + def eraseOutput(self, i: _int) -> None: ... + def addOutput(self) -> Value: ... + def scopeName(self) -> str: ... + def isNondeterministic(self) -> _bool: ... + def copyAttributes(self, rhs: Node) -> Node: ... + def copyMetadata(self, rhs: Node) -> Node: ... + def hasAttributes(self) -> _bool: ... + def hasAttribute(self, name: str) -> _bool: ... + def removeAttribute(self, attr: str) -> Node: ... + def namedInput(self, name: str) -> Value: ... + def sourceRange(self) -> SourceRange: ... + def owningBlock(self) -> Block: ... + def findNode(self, kind: str, recurse: _bool = True) -> Node: ... + def findAllNodes(self, kind: str, recurse: _bool = True) -> List[Node]: ... + def getModuleHierarchy(self) -> str: ... + def prev(self) -> Node: ... + def destroy(self) -> None: ... + def attributeNames(self) -> List[str]: ... + + # Accessors for attributes as types. + def f(self, name: str) -> _float: ... + def f_(self, name: str, val: _float) -> Node: ... + def fs(self, name: str) -> List[_float]: ... + def fs_(self, name: str, val: List[_float]) -> Node: ... + def c(self, name: str) -> complex: ... + def c_(self, name: str, val: complex) -> Node: ... + def s(self, name: str) -> str: ... + def s_(self, name: str, val: str) -> Node: ... + def ss(self, name: str) -> List[str]: ... + def ss_(self, name: str, val: List[str]) -> Node: ... + def i(self, name: str) -> _int: ... + def i_(self, name: str, val: _int) -> Node: ... + # Cannot define "is" like this because it's a reserved keyword in python. + # def is(self, name: str) -> List[_int]: ... + # def is_(self, name: str, val: List[_int]) -> Node: ... + def g(self, name: str) -> Graph: ... + def g_(self, name: str, val: Graph) -> Node: ... + def gs(self, name: str) -> List[Graph]: ... + def gs_(self, name: str, val: List[Graph]) -> Node: ... + def ival(self, name: str) -> IValue: ... + def ival_(self, name: str, val: IValue) -> Node: ... + def t(self, name: str) -> Tensor: ... + def t_(self, name: str, val: Tensor) -> Node: ... + def ts(self, name: str) -> List[Tensor]: ... + def ts_(self, name: str, val: List[Tensor]) -> Node: ... + def ty(self, name: str) -> JitType: ... + def ty_(self, name: str, val: JitType) -> Node: ... + def tys(self, name: str) -> List[JitType]: ... + def tys_(self, name: str, val: List[JitType]) -> Node: ... + +# Defined in torch/torch/csrc/jit/ir/ir.h +class Graph: + def inputs(self) -> Iterator[Value]: ... + def outputs(self) -> Iterator[Value]: ... + def nodes(self) -> Iterator[Node]: ... + def param_node(self) -> Node: ... + def return_node(self) -> Node: ... + def addInput(self, name: str = "") -> Value: ... + def eraseInput(self, i: _int) -> None: ... + def registerOutput(self, n: Value) -> _int: ... + def eraseOutput(self, i: _int) -> None: ... + def create(self, name: str, args, num_outputs: _int) -> Node: ... + def appendNode(self, n: Node) -> Node: ... + def prependNode(self, n: Node) -> Node: ... + def insertNode(self, n: Node) -> Node: ... + def block(self) -> Block: ... + def lint(self) -> None: ... + def alias_db(self) -> AliasDb: ... + def setInsertPoint(self, n: Union[Block, Node]) -> None: ... + def insert_point_guard(self, n: Union[Block, Node]) -> _InsertPoint: ... + def insertPoint(self) -> Node: ... + def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ... + def makeMultiOutputIntoTuple(self) -> None: ... + def copy(self) -> Graph: ... + +# Defined in torch/aten/src/ATen/core/alias_info.h +class AliasInfo: + is_write: _bool + before_set: Set[str] + after_set: Set[str] + +# Defined in torch/aten/src/ATen/core/function_schema.h +class Argument: + name: str + type: JitType + default_value: Optional[Any] + def has_default_value(self) -> _bool: ... + kwarg_only: _bool + is_out: _bool + alias_info: Optional[AliasInfo] + +class FunctionSchema: + arguments: List[Argument] + returns: List[Argument] + name: str + overload_name: str + +class _UpgraderEntry: + bumped_at_version: _int + upgrader_name: str + old_schema: str + def __init__( + self, + bumped_at_version: _int, + upgrader_name: str, + old_schema: str, + ) -> None: ... + +class _UpgraderRange: + min_version: _int + max_version: _int + +def _get_max_operator_version() -> _int: ... +def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ... +def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ... +def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ... +def _test_only_remove_entry_to_op_version(op_name: str) -> None: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class ScriptModuleSerializer: + def __init__(self, export_writer: PyTorchFileWriter) -> None: ... + def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ... + def write_files(self) -> None: ... + def storage_context(self) -> SerializationStorageContext: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class SerializationStorageContext: + def __init__(self) -> None: ... + def has_storage(self, storage: Storage) -> _bool: ... + def get_or_add_storage(self, storage: Storage) -> _int: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class DeserializationStorageContext: + def __init__(self) -> None: ... + def get_storage(self, name: str, dtype: _dtype) -> Tensor: ... + def has_storage(self, name: str) -> _bool: ... + def add_storage(self, name: str, tensor: Tensor) -> _int: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class ConcreteModuleTypeBuilder: + def __init__(self, obj: Any) -> None: ... + def set_module_dict(self): ... + def set_module_list(self): ... + def set_parameter_list(self): ... + def set_parameter_dict(self): ... + def add_attribute( + self, + name: str, + ty: JitType, + is_param: _bool, + is_buffer: _bool, + ): ... + def add_module(self, name: str, meta: ConcreteModuleType): ... + def add_constant(self, name: str, value: Any): ... + def add_overload(self, method_name: str, overloaded_method_names: List[str]): ... + def add_builtin_function(self, name: str, symbol_name: str): ... + def add_failed_attribute(self, name: str, failure_reason: str): ... + def add_function_attribute( + self, + name: str, + ty: JitType, + func: Callable[..., Any], + ): ... + def add_ignored_attribute(self, name: str): ... + def add_ignored_attributes(self, names: List[str]): ... + def add_forward_hook(self, hook: Callable[..., Any]): ... + def add_forward_pre_hook(self, pre_hook: Callable[..., Any]): ... + +class ConcreteModuleType: + def get_constants(self) -> Dict[str, Any]: ... + def equals(self, other: ConcreteModuleType) -> _bool: ... + @staticmethod + def from_jit_type(ty: JitType) -> ConcreteModuleType: ... + +class CallStack: + def __init__(self, name: str, range: SourceRange): ... + +class ErrorReport: + def __init__(self, range: SourceRange) -> None: ... + def what(self) -> str: ... + @staticmethod + def call_stack() -> str: ... + +class CompilationUnit: + def __init__(self, lang: str = ..., _frames_up: _int = ...) -> None: ... + def find_function(self, name: str) -> ScriptFunction: ... + def __getattr__(self, name: str) -> ScriptFunction: ... + def define( + self, + script: str, + rcb: ResolutionCallback = ..., + _frames_up: _int = ..., + ): ... + def get_interface(self, name: str) -> InterfaceType: ... + def get_functions(self) -> List[ScriptFunction]: ... + def create_function( + self, + name: str, + graph: Graph, + shouldMangle: _bool = ..., + ) -> ScriptFunction: ... + def get_class(self, name: str) -> ClassType: ... + +class ScriptObject: + def setattr(self, name: str, value: Any): ... + +class ScriptModule(ScriptObject): + def _method_names(self) -> List[str]: ... + def _get_method(self, name: str) -> ScriptMethod: ... + +class LiteScriptModule: + def __call__(self, *input): ... + def find_method(self, method_name: str): ... + def forward(self, *input) -> List[str]: ... + def run_method(self, method_name: str, *input): ... + +# NOTE: switch to collections.abc.Callable in python 3.9 +class ScriptFunction(Generic[P, ReturnVal]): + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ... + def save(self, filename: str, _extra_files: Dict[str, bytes]) -> None: ... + def save_to_buffer(self, _extra_files: Dict[str, bytes]) -> bytes: ... + @property + def graph(self) -> Graph: ... + def inlined_graph(self) -> Graph: ... + def schema(self) -> FunctionSchema: ... + def code(self) -> str: ... + def name(self) -> str: ... + @property + def qualified_name(self) -> str: ... + +# NOTE: switch to collections.abc.Callable in python 3.9 +class ScriptMethod(Generic[P, ReturnVal]): + graph: Graph + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ... + @property + def owner(self) -> ScriptModule: ... + @property + def name(self) -> str: ... + +class ScriptDict(Generic[K, T]): + def __init__(self, dict: Dict[K, T]) -> None: ... + def __len__(self) -> _int: ... + def __contains__(self, key: K) -> _bool: ... + def __getitem__(self, key: K) -> T: ... + def __setitem__(self, key: K, value: T) -> None: ... + def __delitem__(self, key: K) -> None: ... + def __iter__(self) -> Iterator[K]: ... + def items(self) -> Iterator[tuple[K, T]]: ... + def keys(self) -> Iterator[K]: ... + +class ScriptList(Generic[T]): + def __init__(self, list: List[T]) -> None: ... + def __len__(self) -> _int: ... + def __contains__(self, item: T) -> _bool: ... + @overload + def __getitem__(self, idx: _int) -> T: ... + @overload + def __getitem__(self, idx: slice) -> ScriptList[T]: ... + @overload + def __setitem__(self, idx: _int, value: T) -> None: ... + @overload + def __setitem__(self, idx: slice, value: List[T]) -> None: ... + def __delitem__(self, idx: _int) -> None: ... + def __iter__(self) -> Iterator[T]: ... + def count(self, value: T) -> _int: ... + def remove(self, value: T) -> None: ... + def append(self, value: T) -> None: ... + def clear(self) -> None: ... + @overload + def extend(self, values: List[T]) -> None: ... + @overload + def extend(self, values: Iterable[T]) -> None: ... + @overload + def pop(self) -> T: ... + @overload + def pop(self, idx: _int) -> T: ... + +class ModuleDict: + def __init__(self, mod: ScriptModule) -> None: ... + def items(self) -> List[Tuple[str, Any]]: ... + +class ParameterDict: + def __init__(self, mod: ScriptModule) -> None: ... + +class BufferDict: + def __init__(self, mod: ScriptModule) -> None: ... + +# Defined in torch/csrc/jit/api/module.h +class Module: ... + +# Defined in torch/csrc/Module.cpp +def _initExtension(shm_manager_path: str) -> None: ... # THPModule_initExtension +def _autograd_init() -> _bool: ... # THPAutograd_initExtension +def _add_docstr(obj: T, doc_obj: str) -> T: ... # THPModule_addDocStr +def _init_names(arg: Sequence[Type]) -> None: ... # THPModule_initNames +def _has_distributed() -> _bool: ... # THPModule_hasDistributed +def _set_default_tensor_type(type) -> None: ... # THPModule_setDefaultTensorType +def _set_default_dtype(d: _dtype) -> None: ... # THPModule_setDefaultDtype +def _infer_size(arg1: Size, arg2: Size) -> Size: ... # THPModule_inferSize +def _crash_if_csrc_asan() -> _int: ... # THPModule_crashIfCsrcASAN +def _crash_if_csrc_ubsan() -> _int: ... # THPModule_crashIfCsrcUBSAN +def _crash_if_aten_asan() -> _int: ... # THPModule_crashIfATenASAN +def _show_config() -> str: ... # THPModule_showConfig +def _cxx_flags() -> str: ... # THPModule_cxxFlags +def _parallel_info() -> str: ... # THPModule_parallelInfo +def _get_cpu_capability() -> str: ... # THPModule_getCpuCapability +def _set_backcompat_broadcast_warn( + arg: _bool, +) -> None: ... # THPModule_setBackcompatBroadcastWarn +def _get_backcompat_broadcast_warn() -> _bool: ... # THPModule_getBackcompatBroadcastWarn +def _set_backcompat_keepdim_warn( + arg: _bool, +) -> None: ... # THPModule_setBackcompatKeepdimWarn +def _get_backcompat_keepdim_warn() -> _bool: ... # THPModule_getBackcompatKeepdimWarn +def get_num_thread() -> _int: ... # THPModule_getNumThreads +def set_num_threads(nthreads: _int) -> None: ... # THPModule_setNumThreads +def get_num_interop_threads() -> _int: ... # THPModule_getNumInteropThreads +def set_num_interop_threads( + nthreads: _int, +) -> None: ... # THPModule_setNumInteropThreads +def _get_cudnn_enabled() -> _bool: ... # THPModule_userEnabledCuDNN +def _set_cudnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledCuDNN +def _get_flash_sdp_enabled() -> _bool: ... # THPModule_userEnabledFusedSDP +def _set_sdp_use_flash(arg: _bool) -> None: ... # THPModule_setSDPUseFlash +def _get_mem_efficient_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP +def _set_sdp_use_mem_efficient( + arg: _bool, +) -> None: ... # THPModule_setSDPUseMemEfficient +def _get_math_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP +def _set_sdp_use_math(arg: _bool) -> None: ... # THPModule_setSDPUseMath +def _get_mkldnn_enabled() -> _bool: ... # THPModule_userEnabledMkldnn +def _set_mkldnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledMkldnn +def _get_cudnn_benchmark() -> _bool: ... # THPModule_benchmarkCuDNN +def _set_cudnn_benchmark(arg: _bool) -> None: ... # THPModule_setBenchmarkCuDNN +def _get_cudnn_deterministic() -> _bool: ... # THPModule_deterministicCuDNN +def _set_cudnn_deterministic(arg: _bool) -> None: ... # THPModule_setDeterministicCuDNN +def _get_deterministic_algorithms() -> _bool: ... # THPModule_deterministicAlgorithms +def _get_deterministic_algorithms_warn_only() -> _bool: ... # THPModule_deterministicAlgorithmsWarnOnly +def _set_deterministic_algorithms( + mode: _bool, + *, + warn_only: _bool = ..., +) -> None: ... # THPModule_setDeterministicAlgorithms +def _get_deterministic_fill_uninitialized_memory() -> _bool: ... # THPModule_deterministicFillUninitializedMemory +def _set_deterministic_fill_uninitialized_memory(arg: _bool) -> None: ... # THPModule_setDeterministicFillUninitializedMemory +def _get_warnAlways() -> _bool: ... # THPModule_warnAlways +def _set_warnAlways(arg: _bool) -> None: ... # THPModule_setWarnAlways +def _get_cudnn_allow_tf32() -> _bool: ... # THPModule_allowTF32CuDNN +def _set_cudnn_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuDNN +def _get_cublas_allow_tf32() -> _bool: ... # THPModule_allowTF32CuBLAS +def _set_cublas_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuBLAS +def _get_float32_matmul_precision() -> str: ... # THPModule_float32MatmulPrecision +def _set_float32_matmul_precision( + arg: str, +) -> None: ... # THPModule_setFloat32MatmulPrecision +def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... # THPModule_allowFP16ReductionCuBLAS +def _set_cublas_allow_fp16_reduced_precision_reduction( + arg: _bool, +) -> None: ... # THPModule_setAllowFP16ReductionCuBLAS +def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... # THPModule_allowBF16ReductionCuBLAS +def _set_cublas_allow_bf16_reduced_precision_reduction( + arg: _bool, +) -> None: ... # THPModule_setAllowBF16ReductionCuBLAS +def _set_conj(x: Tensor, conj: _bool) -> None: ... +def _set_neg(x: Tensor, neg: _bool) -> None: ... +def _set_meta_in_tls_dispatch_include(meta_in_tls: _bool) -> None: ... +def _meta_in_tls_dispatch_include() -> _bool: ... +def _stash_obj_in_tls(key: str, arg: Any) -> None: ... +def _get_obj_in_tls(key: str) -> Any: ... +def _is_key_in_tls(key: str) -> _bool: ... +def _select_conv_backend(*args, **kwargs) -> ConvBackend: ... +def _conv_determine_backend_memory_format( + input: Tensor, + weight: Tensor, + backend: ConvBackend, +) -> memory_format: ... +def _has_storage(x: Tensor) -> _bool: ... +def _construct_storage_from_data_pointer(data_ptr: _int, device: torch.device, size: _int) -> Storage: ... +def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ... +def _group_tensors_by_device_and_dtype(nested_tensorlists: List[List[Optional[Tensor]]], with_indices: _bool = False) -> Dict[Tuple[torch.device, str], Tuple[List[List[Optional[Tensor]]], List[_int]]]: ... + +# NB: There is no Capsule type in typing, see +# https://code.activestate.com/lists/python-dev/139675/ +def _to_dlpack(data: Tensor) -> Any: ... # THPModule_toDLPack +def _from_dlpack(data: Any) -> Tensor: ... # THPModule_fromDLPack +def _get_cpp_backtrace( + frames_to_skip: _int, + maximum_number_of_frames: _int, +) -> str: ... # THPModule_getCppBacktrace +def set_flush_denormal(arg: _bool) -> _bool: ... # THPModule_setFlushDenormal +def get_default_dtype() -> _dtype: ... # THPModule_getDefaultDtype +def _get_default_device() -> str: ... # THPModule_getDefaultDevice +def _get_qengine() -> _int: ... # THPModule_qEngine +def _set_qengine(qengine: _int) -> None: ... # THPModule_setQEngine +def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines +def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK +def _check_sparse_tensor_invariants() -> _bool: ... # THPModule_checkSparseTensorInvariants +def _set_check_sparse_tensor_invariants( + arg: _bool, +) -> None: ... # THPModule_setCheckSparseTensorInvariants +def _set_default_mobile_cpu_allocator() -> None: ... # THPModule_setDefaultMobileCPUAllocator +def _unset_default_mobile_cpu_allocator() -> None: ... # THPModule_unsetDefaultMobileCPUAllocator +def _is_torch_function_enabled() -> _bool: ... # THPModule_isEnabledTorchFunction +def _has_torch_function( + args: Iterable[Any], +) -> _bool: ... # THPModule_has_torch_function +def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary +def _has_torch_function_variadic( + *args: Any, +) -> _bool: ... # THPModule_has_torch_function_variadic +def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting +def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting +def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython +def _log_api_usage_metadata(event: str, metadata_map: Dict[str, str]) -> None: ... # LogAPIUsageMetadataFromPython +def _demangle(str) -> str: ... # c10::demangle +def _disabled_torch_function_impl( + func: Callable, + types: Iterable[Type], + args: Tuple, + kwargs: Dict, +) -> Any: ... # THPModule_disable_torch_function +def _disabled_torch_dispatch_impl( + func: Callable, + types: Iterable[Type], + args: Tuple, + kwargs: Dict, +) -> Any: ... # THPModule_disable_dispatch_function +def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ... +def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ... + +class _LinalgBackend: + Default: _LinalgBackend + Cusolver: _LinalgBackend + Magma: _LinalgBackend + +class ConvBackend(Enum): ... + +class Tag(Enum): + core: _int = 0 + data_dependent_output: _int = 1 + dynamic_output_shape: _int = 2 + generated: _int = 3 + inplace_view: _int = 4 + nondeterministic_bitwise: _int = 5 + nondeterministic_seeded: _int = 6 + pointwise: _int = 7 + pt2_compliant_tag: _int = 8 + view_copy: _int = 9 + +# Defined in `valgrind.h` and `callgrind.h` respectively. +def _valgrind_supported_platform() -> _bool: ... # NVALGRIND +def _valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT +def _valgrind_toggle_and_dump_stats() -> None: ... # CALLGRIND_TOGGLE_COLLECT and CALLGRIND_DUMP_STATS + +has_openmp: _bool +has_mkl: _bool +_has_mps: _bool +has_lapack: _bool +_has_cuda: _bool +_has_mkldnn: _bool +_has_cudnn: _bool +has_spectral: _bool +_GLIBCXX_USE_CXX11_ABI: _bool +default_generator: Generator + +# Defined in torch/csrc/autograd/init.cpp +def _set_grad_enabled(enabled: _bool) -> None: ... +def is_grad_enabled() -> _bool: ... +def _set_fwd_grad_enabled(enabled: _bool) -> None: ... +def _is_fwd_grad_enabled() -> _bool: ... +def is_inference_mode_enabled() -> _bool: ... +def set_autocast_enabled(enabled: _bool) -> None: ... +def is_autocast_enabled() -> _bool: ... +def clear_autocast_cache() -> None: ... +def set_autocast_cpu_enabled(enabled: _bool) -> None: ... +def is_autocast_cpu_enabled() -> _bool: ... +def _is_any_autocast_enabled() -> _bool: ... +def set_autocast_cpu_dtype(dtype: _dtype) -> None: ... +def set_autocast_gpu_dtype(dtype: _dtype) -> None: ... +def get_autocast_cpu_dtype() -> _dtype: ... +def get_autocast_gpu_dtype() -> _dtype: ... +def autocast_increment_nesting() -> _int: ... +def autocast_decrement_nesting() -> _int: ... +def is_autocast_cache_enabled() -> _bool: ... +def set_autocast_cache_enabled(enabled: _bool) -> None: ... +def _increment_version(tensor: Tensor) -> None: ... +def set_anomaly_enabled(enabled: _bool, check_nan: _bool = True) -> None: ... +def is_anomaly_enabled() -> _bool: ... +def is_anomaly_check_nan_enabled() -> _bool: ... +def _is_multithreading_enabled() -> _bool: ... +def _set_multithreading_enabled(enabled: _bool) -> None: ... +def _set_view_replay_enabled(enabled: _bool) -> None: ... +def _is_view_replay_enabled() -> _bool: ... +def _enter_dual_level() -> _int: ... +def _exit_dual_level(level: _int) -> None: ... +def _make_dual(tensor: Tensor, tangent: Tensor, level: _int) -> Tensor: ... +def _unpack_dual(tensor: Tensor, level: _int) -> Tensor: ... +def __set_forward_AD_enabled(enabled: _bool) -> None: ... +def __is_forward_AD_enabled() -> _bool: ... +def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ... +def _reset_default_hooks() -> None: ... +def _is_torch_function_mode_enabled() -> _bool: ... +def _set_torch_function_mode(cls: Any) -> None: ... +def _push_on_torch_function_stack(cls: Any) -> None: ... +def _pop_torch_function_stack() -> Any: ... +def _get_function_stack_at(idx: _int) -> Any: ... +def _len_torch_function_stack() -> _int: ... +def _set_torch_dispatch_mode(cls: Any) -> None: ... +def _push_on_torch_dispatch_stack(cls: Any) -> None: ... +def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ... +def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ... +def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Any: ... +def _set_dispatch_mode(mode: Any) -> None: ... +def _get_dispatch_stack_at(idx: _int) -> Any: ... +def _len_torch_dispatch_stack() -> _int: ... + +class _DisableTorchDispatch: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _EnableTorchFunction: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _EnablePythonDispatcher: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _DisablePythonDispatcher: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _EnablePreDispatch: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _DisableFuncTorch: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _DisableAutocast: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _InferenceMode: + def __init__(self, enabled: _bool): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +def _set_autograd_fallback_mode(mode: str) -> None: ... +def _get_autograd_fallback_mode() -> str: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class LoggerBase: ... +class NoopLogger(LoggerBase): ... +class LockingLogger(LoggerBase): ... + +class AggregationType(Enum): + SUM = 0 + AVG = 1 + +class FileCheck: + def run(self, test_string: str) -> None: ... + def check(self, test_string: str) -> FileCheck: ... + def check_not(self, test_string: str) -> FileCheck: ... + def check_same(self, test_string: str) -> FileCheck: ... + def check_next(self, test_string: str) -> FileCheck: ... + def check_count( + self, + test_string: str, + count: _int, + exactly: _bool = False, + ) -> FileCheck: ... + def check_dag(self, test_string: str) -> FileCheck: ... + def check_source_highlighted(self, test_string: str) -> FileCheck: ... + def check_regex(self, test_string: str) -> FileCheck: ... + +# Defined in torch/csrc/jit/python/init.cpp +class PyTorchFileReader: + @overload + def __init__(self, name: str) -> None: ... + @overload + def __init__(self, buffer: BinaryIO) -> None: ... + def get_record(self, name: str) -> bytes: ... + def serialization_id(self) -> str: ... + +class PyTorchFileWriter: + @overload + def __init__(self, name: str) -> None: ... + @overload + def __init__(self, buffer: BinaryIO) -> None: ... + def write_record(self, name: str, data: Union[bytes, _int], size: _int) -> None: ... + def write_end_of_file(self) -> None: ... + def set_min_version(self, version: _int) -> None: ... + def get_all_written_records(self) -> List[str]: ... + def archive_name(self) -> str: ... + def serialization_id(self) -> str: ... + +def _jit_get_inline_everything_mode() -> _bool: ... +def _jit_set_inline_everything_mode(enabled: _bool) -> None: ... +def _jit_get_logging_option() -> str: ... +def _jit_set_logging_option(option: str) -> None: ... +def _jit_set_logging_stream(stream_name: str) -> None: ... +def _jit_pass_cse(Graph) -> _bool: ... +def _jit_pass_dce(Graph) -> None: ... +def _jit_pass_lint(Graph) -> None: ... + +# Defined in torch/csrc/jit/python/python_custom_class.cpp +def _get_custom_class_python_wrapper(name: str, attr: str) -> Any: ... + +# Defined in torch/csrc/Module.cpp +def _rename_privateuse1_backend(backend: str) -> None: ... +def _get_privateuse1_backend_name() -> str: ... + +# Defined in torch/csrc/Generator.cpp +class Generator: + device: _device + def __init__(self, device: Optional[DeviceLikeType] = None) -> None: ... + def get_state(self) -> Tensor: ... + def set_state(self, _new_state: Tensor) -> Generator: ... + def set_offset(self, offset: _int) -> Generator: ... + def get_offset(self) -> _int: ... + def manual_seed(self, seed: _int) -> Generator: ... + def seed(self) -> _int: ... + def initial_seed(self) -> _int: ... + +# Defined in torch/csrc/utils/python_dispatch.cpp + +class _DispatchOperatorHandle: + def schema(self) -> FunctionSchema: ... + +class _DispatchModule: + def def_(self, schema: str, alias: str = "") -> _DispatchModule: ... + def def_legacy(self, schema: str) -> _DispatchModule: ... + def def_name_t_t( + self, + name: str, + dispatch: str, + debug: str = "default_def_name_t_t", + ) -> _DispatchModule: ... + def def_schema_t_t( + self, + schema: str, + dispatch: str, + alias: str, + debug: str = "default_def_schema_t_t", + ) -> _DispatchModule: ... + def impl_t_t( + self, + name: str, + dispatch: str, + debug: str = "impl_t_t", + ) -> _DispatchModule: ... + def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ... + def define(self, schema: str, alias: str = "") -> _DispatchModule: ... + def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ... + +def _dispatch_library( + kind: str, + name: str, + dispatch: str, + file: str = "", + linenum: Any = 0, +) -> _DispatchModule: ... +def _dispatch_dump(name: str) -> str: ... +def _dispatch_dump_table(name: str) -> str: ... +def _dispatch_check_invariants(name: str) -> None: ... +def _dispatch_check_all_invariants() -> None: ... +def _dispatch_call_boxed(handle: _DispatchOperatorHandle, *args, **kwargs) -> Any: ... +def _dispatch_find_schema_or_throw(name: str, overload_name: str) -> _DispatchOperatorHandle: ... +def _dispatch_set_report_error_callback(handle: _DispatchOperatorHandle, callback: Callable) -> None: ... +def _dispatch_has_kernel(name: str) -> _bool: ... +def _dispatch_has_kernel_for_dispatch_key( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... +def _dispatch_has_kernel_for_any_dispatch_key( + name: str, + dispatch_key_set: DispatchKeySet, +) -> _bool: ... +def _dispatch_has_computed_kernel_for_dispatch_key( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... +def _dispatch_find_dangling_impls() -> List[str]: ... +def _dispatch_get_all_op_names() -> List[str]: ... +def _dispatch_tls_set_dispatch_key_excluded( + dispatch: _dispatchkey, + val: _bool, +) -> None: ... +def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ... +def _dispatch_tls_set_dispatch_key_included( + dispatch: _dispatchkey, + val: _bool, +) -> None: ... +def _dispatch_tls_is_dispatch_key_included(dispatch: _dispatchkey) -> _bool: ... +def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ... +def _dispatch_key_name(dispatch: _dispatchkey) -> str: ... +def _dispatch_key_for_device(device_type: str) -> str: ... +def _parse_dispatch_key(key: str) -> Optional[DispatchKey]: ... +def _dispatch_key_parse(dispatch: _dispatchkey) -> DispatchKey: ... +def _dispatch_num_backends() -> _int: ... +def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ... +def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ... +def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ... +def _functionalization_reapply_views_tls() -> _bool: ... + +class DispatchKey(Enum): + Undefined: DispatchKey = ... + FPGA: DispatchKey = ... + ORT: DispatchKey = ... + Vulkan: DispatchKey = ... + Metal: DispatchKey = ... + MKLDNN: DispatchKey = ... + OpenGL: DispatchKey = ... + OpenCL: DispatchKey = ... + IDEEP: DispatchKey = ... + CustomRNGKeyId: DispatchKey = ... + MkldnnCPU: DispatchKey = ... + Sparse: DispatchKey = ... + SparseCsrCPU: DispatchKey = ... + SparseCsrCUDA: DispatchKey = ... + NestedTensor: DispatchKey = ... + Dense: DispatchKey = ... + Python: DispatchKey = ... + FuncTorchDynamicLayerBackMode: DispatchKey = ... + ZeroTensor: DispatchKey = ... + Conjugate: DispatchKey = ... + Negative: DispatchKey = ... + BackendSelect: DispatchKey = ... + Named: DispatchKey = ... + AutogradOther: DispatchKey = ... + AutogradFunctionality: DispatchKey = ... + AutogradNestedTensor: DispatchKey = ... + Tracer: DispatchKey = ... + Autocast: DispatchKey = ... + Batched: DispatchKey = ... + VmapMode: DispatchKey = ... + FuncTorchGradWrapper: DispatchKey = ... + FuncTorchBatched: DispatchKey = ... + BatchedNestedTensor: DispatchKey = ... + FuncTorchVmapMode: DispatchKey = ... + FuncTorchDynamicLayerFrontMode: DispatchKey = ... + Functionalize: DispatchKey = ... + TESTING_ONLY_GenericWrapper: DispatchKey = ... + TESTING_ONLY_GenericMode: DispatchKey = ... + ADInplaceOrView: DispatchKey = ... + Autograd: DispatchKey = ... + CompositeImplicitAutograd: DispatchKey = ... + CompositeImplicitAutogradNestedTensor: DispatchKey = ... + CompositeExplicitAutograd: DispatchKey = ... + CompositeExplicitAutogradNonFunctional: DispatchKey = ... + FuncTorchBatchedDecomposition: DispatchKey = ... + CPU: DispatchKey = ... + CUDA: DispatchKey = ... + HIP: DispatchKey = ... + XLA: DispatchKey = ... + MTIA: DispatchKey = ... + MPS: DispatchKey = ... + IPU: DispatchKey = ... + XPU: DispatchKey = ... + HPU: DispatchKey = ... + VE: DispatchKey = ... + Lazy: DispatchKey = ... + Meta: DispatchKey = ... + PrivateUse1: DispatchKey = ... + PrivateUse2: DispatchKey = ... + PrivateUse3: DispatchKey = ... + QuantizedCPU: DispatchKey = ... + QuantizedCUDA: DispatchKey = ... + QuantizedHIP: DispatchKey = ... + QuantizedXLA: DispatchKey = ... + QuantizedMTIA: DispatchKey = ... + QuantizedMPS: DispatchKey = ... + QuantizedIPU: DispatchKey = ... + QuantizedXPU: DispatchKey = ... + QuantizedHPU: DispatchKey = ... + QuantizedVE: DispatchKey = ... + QuantizedLazy: DispatchKey = ... + QuantizedMeta: DispatchKey = ... + QuantizedPrivateUse1: DispatchKey = ... + QuantizedPrivateUse2: DispatchKey = ... + QuantizedPrivateUse3: DispatchKey = ... + SparseCPU: DispatchKey = ... + SparseCUDA: DispatchKey = ... + SparseHIP: DispatchKey = ... + SparseXLA: DispatchKey = ... + SparseMTIA: DispatchKey = ... + SparseMPS: DispatchKey = ... + SparseIPU: DispatchKey = ... + SparseXPU: DispatchKey = ... + SparseHPU: DispatchKey = ... + SparseVE: DispatchKey = ... + SparseLazy: DispatchKey = ... + SparseMeta: DispatchKey = ... + SparsePrivateUse1: DispatchKey = ... + SparsePrivateUse2: DispatchKey = ... + SparsePrivateUse3: DispatchKey = ... + NestedTensorCPU: DispatchKey = ... + NestedTensorCUDA: DispatchKey = ... + NestedTensorHIP: DispatchKey = ... + NestedTensorXLA: DispatchKey = ... + NestedTensorMTIA: DispatchKey = ... + NestedTensorMPS: DispatchKey = ... + NestedTensorIPU: DispatchKey = ... + NestedTensorXPU: DispatchKey = ... + NestedTensorHPU: DispatchKey = ... + NestedTensorVE: DispatchKey = ... + NestedTensorLazy: DispatchKey = ... + NestedTensorMeta: DispatchKey = ... + NestedTensorPrivateUse1: DispatchKey = ... + NestedTensorPrivateUse2: DispatchKey = ... + NestedTensorPrivateUse3: DispatchKey = ... + AutogradCPU: DispatchKey = ... + AutogradCUDA: DispatchKey = ... + AutogradHIP: DispatchKey = ... + AutogradXLA: DispatchKey = ... + AutogradMTIA: DispatchKey = ... + AutogradMPS: DispatchKey = ... + AutogradIPU: DispatchKey = ... + AutogradXPU: DispatchKey = ... + AutogradHPU: DispatchKey = ... + AutogradVE: DispatchKey = ... + AutogradLazy: DispatchKey = ... + AutogradMeta: DispatchKey = ... + AutogradPrivateUse1: DispatchKey = ... + AutogradPrivateUse2: DispatchKey = ... + AutogradPrivateUse3: DispatchKey = ... + +class DispatchKeySet: + def __init__(self, key: DispatchKey) -> None: ... + def __or__(self, other: DispatchKeySet) -> DispatchKeySet: ... + def __sub__(self, other: DispatchKeySet) -> DispatchKeySet: ... + def __and__(self, other: DispatchKeySet) -> DispatchKeySet: ... + def highestPriorityTypeId(self) -> DispatchKey: ... + def has(self, k: _dispatchkey) -> _bool: ... + def add(self, k: _dispatchkey) -> DispatchKeySet: ... + def remove(self, k: _dispatchkey) -> DispatchKeySet: ... + def __repr__(self) -> str: ... + +_dispatch_autogradother_backends: DispatchKeySet +_additional_keys_to_prop_for_wrapper_tensors: DispatchKeySet + +def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ... +def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ... +def _dispatch_keyset_full() -> DispatchKeySet: ... +def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ... +def _dispatch_get_backend_keyset_from_autograd( + dispatch: _dispatchkey, +) -> DispatchKeySet: ... +def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ... +def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ... +def _dispatch_tls_local_include_set() -> DispatchKeySet: ... +def _dispatch_is_included_in_alias( + dispatch_a: _dispatchkey, + dispatch_b: _dispatchkey, +) -> _bool: ... +def _propagate_xla_data(a: Tensor, b: Tensor) -> None: ... +def _replace_(a: Tensor, b: Tensor) -> None: ... +def _commit_update(a: Tensor) -> None: ... + +class _ExcludeDispatchKeyGuard: + def __init__(self, keyset: DispatchKeySet): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _IncludeDispatchKeyGuard: + def __init__(self, k: DispatchKey): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _ForceDispatchKeyGuard: + def __init__(self, include: DispatchKeySet, exclude: DispatchKeySet): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _AutoDispatchBelowAutograd: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ... +def _dispatch_get_registrations_for_dispatch_key( + dispatch_key: str = "", +) -> List[str]: ... +def _are_functorch_transforms_active() -> _bool: ... + +# Define in torch/csrc/autograd/init.cpp +def _set_python_dispatcher(dispatcher: object) -> None: ... + +def _get_singleton_int(id: _int, coeff: _int) -> SymInt: ... + +def _get_constant_bool_symnode(val: _bool) -> Any: ... + +class _TorchDispatchModeKey(Enum): + FAKE: _TorchDispatchModeKey = ... + PROXY: _TorchDispatchModeKey = ... + FUNCTIONAL: _TorchDispatchModeKey = ... + +class _SetExcludeDispatchKeyGuard: + def __init__(self, k: DispatchKey, enabled: _bool): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +# Defined in torch/csrc/utils/init.cpp +class BenchmarkConfig: + num_calling_threads: _int + num_worker_threads: _int + num_warmup_iters: _int + num_iters: _int + profiler_output_path: str + +class BenchmarkExecutionStats: + latency_avg_ms: _float + num_iters: _int + +class ThroughputBenchmark: + def __init__(self, module: Any) -> None: ... + def add_input(self, *args: Any, **kwargs: Any) -> None: ... + def run_once(self, *args: Any, **kwargs: Any) -> Any: ... + def benchmark(self, config: BenchmarkConfig) -> BenchmarkExecutionStats: ... + +# Defined in torch/csrc/Storage.cpp +class StorageBase(object): ... + +# TODO: where +class DoubleTensor(Tensor): ... +class FloatTensor(Tensor): ... +class BFloat16Tensor(Tensor): ... +class LongTensor(Tensor): ... +class IntTensor(Tensor): ... +class ShortTensor(Tensor): ... +class HalfTensor(Tensor): ... +class CharTensor(Tensor): ... +class ByteTensor(Tensor): ... +class BoolTensor(Tensor): ... + +# Defined in torch/csrc/autograd/python_engine.cpp +class _ImperativeEngine: + def queue_callback(self, callback: Callable[[], None]) -> None: ... + def run_backward(self, *args: Any, **kwargs: Any) -> Tuple[Tensor, ...]: ... + def is_checkpoint_valid(self) -> _bool: ... + +# Defined in torch/csrc/autograd/python_variable.cpp +class _TensorMeta(type): ... + +# Defined in torch/csrc/autograd/python_variable.cpp +class TensorBase(metaclass=_TensorMeta): + requires_grad: _bool + retains_grad: _bool + shape: Size + data: Tensor + names: List[str] + device: _device + dtype: _dtype + layout: _layout + real: Tensor + imag: Tensor + T: Tensor + H: Tensor + mT: Tensor + mH: Tensor + ndim: _int + output_nr: _int + _version: _int + _base: Optional[Tensor] + _cdata: _int + grad_fn: Optional[_Node] + _grad_fn: Any + _grad: Optional[Tensor] + grad: Optional[Tensor] + _backward_hooks: Optional[Dict[_int, Callable[[Tensor], Optional[Tensor]]]] + nbytes: _int + itemsize: _int + _has_symbolic_sizes_strides: _bool + def __abs__(self) -> Tensor: ... + def __add__(self, other: Any) -> Tensor: ... + @overload + def __and__(self, other: Tensor) -> Tensor: ... + @overload + def __and__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __and__(self, other: Any) -> Tensor: ... + def __bool__(self) -> builtins.bool: ... + def __complex__(self) -> builtins.complex: ... + def __div__(self, other: Any) -> Tensor: ... + def __eq__(self, other: Any) -> Tensor: ... # type: ignore[override] + def __float__(self) -> builtins.float: ... + def __floordiv__(self, other: Any) -> Tensor: ... + def __ge__(self, other: Any) -> Tensor: ... + def __getitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]]) -> Tensor: ... + def __gt__(self, other: Any) -> Tensor: ... + def __iadd__(self, other: Any) -> Tensor: ... + @overload + def __iand__(self, other: Tensor) -> Tensor: ... + @overload + def __iand__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __iand__(self, other: Any) -> Tensor: ... + def __idiv__(self, other: Any) -> Tensor: ... + def __ifloordiv__(self, other: Any) -> Tensor: ... + @overload + def __ilshift__(self, other: Tensor) -> Tensor: ... + @overload + def __ilshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __ilshift__(self, other: Any) -> Tensor: ... + def __imod__(self, other: Any) -> Tensor: ... + def __imul__(self, other: Any) -> Tensor: ... + def __index__(self) -> builtins.int: ... + @overload + def __init__(self, *args: Any, device: Optional[DeviceLikeType] = None) -> None: ... + @overload + def __init__(self, storage: Storage) -> None: ... + @overload + def __init__(self, other: Tensor) -> None: ... + @overload + def __init__(self, size: _size, *, device: Optional[DeviceLikeType] = None) -> None: ... + def __int__(self) -> builtins.int: ... + def __invert__(self) -> Tensor: ... + @overload + def __ior__(self, other: Tensor) -> Tensor: ... + @overload + def __ior__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __ior__(self, other: Any) -> Tensor: ... + @overload + def __irshift__(self, other: Tensor) -> Tensor: ... + @overload + def __irshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __irshift__(self, other: Any) -> Tensor: ... + def __isub__(self, other: Any) -> Tensor: ... + @overload + def __ixor__(self, other: Tensor) -> Tensor: ... + @overload + def __ixor__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __ixor__(self, other: Any) -> Tensor: ... + def __le__(self, other: Any) -> Tensor: ... + def __long__(self) -> builtins.int: ... + @overload + def __lshift__(self, other: Tensor) -> Tensor: ... + @overload + def __lshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __lshift__(self, other: Any) -> Tensor: ... + def __lt__(self, other: Any) -> Tensor: ... + def __matmul__(self, other: Any) -> Tensor: ... + def __mod__(self, other: Any) -> Tensor: ... + def __mul__(self, other: Any) -> Tensor: ... + def __ne__(self, other: Any) -> Tensor: ... # type: ignore[override] + def __neg__(self) -> Tensor: ... + def __new__(self, *args, **kwargs) -> Tensor: ... + def __nonzero__(self) -> builtins.bool: ... + @overload + def __or__(self, other: Tensor) -> Tensor: ... + @overload + def __or__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __or__(self, other: Any) -> Tensor: ... + def __pow__(self, other: Any) -> Tensor: ... + def __radd__(self, other: Any) -> Tensor: ... + def __rand__(self, other: Any) -> Tensor: ... + def __rfloordiv__(self, other: Any) -> Tensor: ... + def __rmul__(self, other: Any) -> Tensor: ... + def __ror__(self, other: Any) -> Tensor: ... + def __rpow__(self, other: Any) -> Tensor: ... + @overload + def __rshift__(self, other: Tensor) -> Tensor: ... + @overload + def __rshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __rshift__(self, other: Any) -> Tensor: ... + def __rsub__(self, other: Any) -> Tensor: ... + def __rtruediv__(self, other: Any) -> Tensor: ... + def __rxor__(self, other: Any) -> Tensor: ... + def __setitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]], val: Union[Tensor, Number]) -> None: ... + def __sub__(self, other: Any) -> Tensor: ... + def __truediv__(self, other: Any) -> Tensor: ... + @overload + def __xor__(self, other: Tensor) -> Tensor: ... + @overload + def __xor__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __xor__(self, other: Any) -> Tensor: ... + def _addmm_activation(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False) -> Tensor: ... + def _autocast_to_full_precision(self, cuda_enabled: _bool, cpu_enabled: _bool) -> Tensor: ... + def _autocast_to_reduced_precision(self, cuda_enabled: _bool, cpu_enabled: _bool, cuda_dtype: _dtype, cpu_dtype: _dtype) -> Tensor: ... + def _coalesced_(self, coalesced: _bool) -> Tensor: ... + def _conj(self) -> Tensor: ... + def _conj_physical(self) -> Tensor: ... + def _dimI(self) -> _int: ... + def _dimV(self) -> _int: ... + def _indices(self) -> Tensor: ... + def _is_all_true(self) -> Tensor: ... + def _is_any_true(self) -> Tensor: ... + def _is_view(self) -> _bool: ... + def _is_zerotensor(self) -> _bool: ... + @staticmethod + def _make_subclass(cls: Type[S], data: Tensor, require_grad: _bool = False, dispatch_strides: _bool = False, dispatch_device: _bool = False, device_for_backend_keys: Optional[_device] = None) -> S: ... + def _neg_view(self) -> Tensor: ... + def _nested_tensor_size(self) -> Tensor: ... + def _nested_tensor_storage_offsets(self) -> Tensor: ... + def _nested_tensor_strides(self) -> Tensor: ... + def _nnz(self) -> _int: ... + def _sparse_mask_projection(self, mask: Tensor, accumulate_matches: _bool = False) -> Tensor: ... + def _to_dense(self, dtype: Optional[_dtype] = None, masked_grad: Optional[_bool] = None) -> Tensor: ... + @overload + def _to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor: ... + @overload + def _to_sparse(self, sparse_dim: _int) -> Tensor: ... + def _to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ... + def _to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ... + def _to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor: ... + def _to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor: ... + def _values(self) -> Tensor: ... + def abs(self) -> Tensor: ... + def abs_(self) -> Tensor: ... + def absolute(self) -> Tensor: ... + def absolute_(self) -> Tensor: ... + def acos(self) -> Tensor: ... + def acos_(self) -> Tensor: ... + def acosh(self) -> Tensor: ... + def acosh_(self) -> Tensor: ... + def add(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ... + def add_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number] = 1) -> Tensor: ... + def addbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addcdiv(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: ... + def addcdiv_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: ... + def addcmul(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: ... + def addcmul_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: ... + def addmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addmm_(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addmv(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addmv_(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addr(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def addr_(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def adjoint(self) -> Tensor: ... + def align_as(self, other: Tensor) -> Tensor: ... + @overload + def align_to(self, order: Sequence[Union[str, ellipsis, None]], ellipsis_idx: _int) -> Tensor: ... + @overload + def align_to(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... + @overload + def all(self) -> Tensor: ... + @overload + def all(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor: ... + @overload + def all(self, dim: _int, keepdim: _bool = False) -> Tensor: ... + @overload + def all(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor: ... + def allclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool: ... + def amax(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor: ... + def amin(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor: ... + def aminmax(self, *, dim: Optional[_int] = None, keepdim: _bool = False) -> torch.return_types.aminmax: ... + def angle(self) -> Tensor: ... + @overload + def any(self) -> Tensor: ... + @overload + def any(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor: ... + @overload + def any(self, dim: _int, keepdim: _bool = False) -> Tensor: ... + @overload + def any(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor: ... + def apply_(self, callable: Callable) -> Tensor: ... + def arccos(self) -> Tensor: ... + def arccos_(self) -> Tensor: ... + def arccosh(self) -> Tensor: ... + def arccosh_(self) -> Tensor: ... + def arcsin(self) -> Tensor: ... + def arcsin_(self) -> Tensor: ... + def arcsinh(self) -> Tensor: ... + def arcsinh_(self) -> Tensor: ... + def arctan(self) -> Tensor: ... + def arctan2(self, other: Tensor) -> Tensor: ... + def arctan2_(self, other: Tensor) -> Tensor: ... + def arctan_(self) -> Tensor: ... + def arctanh(self) -> Tensor: ... + def arctanh_(self) -> Tensor: ... + def argmax(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor: ... + def argmin(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor: ... + @overload + def argsort(self, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor: ... + @overload + def argsort(self, dim: _int = -1, descending: _bool = False) -> Tensor: ... + @overload + def argsort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor: ... + def argwhere(self) -> Tensor: ... + def as_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... + def as_strided_(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... + def as_strided_scatter(self, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... + def as_subclass(self, cls: Type[S]) -> S: ... + def asin(self) -> Tensor: ... + def asin_(self) -> Tensor: ... + def asinh(self) -> Tensor: ... + def asinh_(self) -> Tensor: ... + def atan(self) -> Tensor: ... + def atan2(self, other: Tensor) -> Tensor: ... + def atan2_(self, other: Tensor) -> Tensor: ... + def atan_(self) -> Tensor: ... + def atanh(self) -> Tensor: ... + def atanh_(self) -> Tensor: ... + def baddbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def baddbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def bernoulli(self, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def bernoulli(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def bernoulli_(self, p: Tensor, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def bernoulli_(self, p: _float = 0.5, *, generator: Optional[Generator] = None) -> Tensor: ... + def bfloat16(self) -> Tensor: ... + def bincount(self, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor: ... + @overload + def bitwise_and(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_and(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_and_(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_and_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_left_shift(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_left_shift(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_left_shift_(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_left_shift_(self, other: Union[Number, _complex]) -> Tensor: ... + def bitwise_not(self) -> Tensor: ... + def bitwise_not_(self) -> Tensor: ... + @overload + def bitwise_or(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_or(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_or_(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_or_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_right_shift(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_right_shift(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_right_shift_(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_right_shift_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_xor(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_xor(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def bitwise_xor_(self, other: Tensor) -> Tensor: ... + @overload + def bitwise_xor_(self, other: Union[Number, _complex]) -> Tensor: ... + def bmm(self, mat2: Tensor) -> Tensor: ... + def bool(self) -> Tensor: ... + @overload + def broadcast_to(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: ... + @overload + def broadcast_to(self, *size: _int) -> Tensor: ... + def byte(self) -> Tensor: ... + def cauchy_(self, median: _float = 0, sigma: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: ... + def ccol_indices(self) -> Tensor: ... + def ceil(self) -> Tensor: ... + def ceil_(self) -> Tensor: ... + def chalf(self, *, memory_format: Optional[memory_format] = None) -> Tensor: ... + def char(self) -> Tensor: ... + def cholesky(self, upper: _bool = False) -> Tensor: ... + def cholesky_inverse(self, upper: _bool = False) -> Tensor: ... + def cholesky_solve(self, input2: Tensor, upper: _bool = False) -> Tensor: ... + def chunk(self, chunks: _int, dim: _int = 0) -> List[Tensor]: ... + @overload + def clamp(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... + @overload + def clamp(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... + @overload + def clamp_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... + @overload + def clamp_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... + @overload + def clamp_max(self, max: Tensor) -> Tensor: ... + @overload + def clamp_max(self, max: Union[Number, _complex]) -> Tensor: ... + @overload + def clamp_max_(self, max: Tensor) -> Tensor: ... + @overload + def clamp_max_(self, max: Union[Number, _complex]) -> Tensor: ... + @overload + def clamp_min(self, min: Tensor) -> Tensor: ... + @overload + def clamp_min(self, min: Union[Number, _complex]) -> Tensor: ... + @overload + def clamp_min_(self, min: Tensor) -> Tensor: ... + @overload + def clamp_min_(self, min: Union[Number, _complex]) -> Tensor: ... + @overload + def clip(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... + @overload + def clip(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... + @overload + def clip_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... + @overload + def clip_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... + def clone(self, *, memory_format: Optional[memory_format] = None) -> Tensor: ... + def coalesce(self) -> Tensor: ... + def col_indices(self) -> Tensor: ... + def conj(self) -> Tensor: ... + def conj_physical(self) -> Tensor: ... + def conj_physical_(self) -> Tensor: ... + def contiguous(self, memory_format=torch.contiguous_format) -> Tensor: ... + def copy_(self, src: Tensor, non_blocking: _bool = False) -> Tensor: ... + @overload + def copysign(self, other: Tensor) -> Tensor: ... + @overload + def copysign(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def copysign_(self, other: Tensor) -> Tensor: ... + @overload + def copysign_(self, other: Union[Number, _complex]) -> Tensor: ... + def corrcoef(self) -> Tensor: ... + def cos(self) -> Tensor: ... + def cos_(self) -> Tensor: ... + def cosh(self) -> Tensor: ... + def cosh_(self) -> Tensor: ... + @overload + def count_nonzero(self, dim: Optional[_int] = None) -> Tensor: ... + @overload + def count_nonzero(self, dim: _size) -> Tensor: ... + @overload + def count_nonzero(self, *dim: _int) -> Tensor: ... + def cov(self, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor: ... + def cpu(self) -> Tensor: ... + def cross(self, other: Tensor, dim: Optional[_int] = None) -> Tensor: ... + def crow_indices(self) -> Tensor: ... + def cuda(self, device: Optional[Union[_device, _int, str]] = None, non_blocking: _bool = False) -> Tensor: ... + @overload + def cummax(self, dim: _int) -> torch.return_types.cummax: ... + @overload + def cummax(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummax: ... + @overload + def cummin(self, dim: _int) -> torch.return_types.cummin: ... + @overload + def cummin(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummin: ... + @overload + def cumprod(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumprod(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumprod_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumprod_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumsum(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumsum(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumsum_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def cumsum_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + def data_ptr(self) -> _int: ... + def deg2rad(self) -> Tensor: ... + def deg2rad_(self) -> Tensor: ... + def dense_dim(self) -> _int: ... + def dequantize(self) -> Tensor: ... + def det(self) -> Tensor: ... + def detach(self) -> Tensor: ... + def detach_(self) -> Tensor: ... + def diag(self, diagonal: _int = 0) -> Tensor: ... + def diag_embed(self, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor: ... + def diagflat(self, offset: _int = 0) -> Tensor: ... + @overload + def diagonal(self, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor: ... + @overload + def diagonal(self, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: ... + def diagonal_scatter(self, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: ... + def diff(self, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None) -> Tensor: ... + def digamma(self) -> Tensor: ... + def digamma_(self) -> Tensor: ... + def dim(self) -> _int: ... + def dist(self, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ... + def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ... + def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ... + @overload + def divide(self, other: Tensor) -> Tensor: ... + @overload + def divide(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: ... + @overload + def divide(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: ... + @overload + def divide(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def divide_(self, other: Tensor) -> Tensor: ... + @overload + def divide_(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: ... + @overload + def divide_(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: ... + @overload + def divide_(self, other: Union[Number, _complex]) -> Tensor: ... + def dot(self, tensor: Tensor) -> Tensor: ... + def double(self) -> Tensor: ... + @overload + def dsplit(self, sections: _int) -> List[Tensor]: ... + @overload + def dsplit(self, indices: _size) -> List[Tensor]: ... + @overload + def dsplit(self, *indices: _int) -> List[Tensor]: ... + def element_size(self) -> _int: ... + @overload + def eq(self, other: Tensor) -> Tensor: ... + @overload + def eq(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def eq_(self, other: Tensor) -> Tensor: ... + @overload + def eq_(self, other: Union[Number, _complex]) -> Tensor: ... + def equal(self, other: Tensor) -> _bool: ... + def erf(self) -> Tensor: ... + def erf_(self) -> Tensor: ... + def erfc(self) -> Tensor: ... + def erfc_(self) -> Tensor: ... + def erfinv(self) -> Tensor: ... + def erfinv_(self) -> Tensor: ... + def exp(self) -> Tensor: ... + def exp2(self) -> Tensor: ... + def exp2_(self) -> Tensor: ... + def exp_(self) -> Tensor: ... + @overload + def expand(self, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False) -> Tensor: ... + @overload + def expand(self, *size: _int, implicit: _bool = False) -> Tensor: ... + def expand_as(self, other: Tensor) -> Tensor: ... + def expm1(self) -> Tensor: ... + def expm1_(self) -> Tensor: ... + def exponential_(self, lambd: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def fill_(self, value: Tensor) -> Tensor: ... + @overload + def fill_(self, value: Union[Number, _complex]) -> Tensor: ... + def fill_diagonal_(self, fill_value: Union[Number, _complex], wrap: _bool = False) -> Tensor: ... + def fix(self) -> Tensor: ... + def fix_(self) -> Tensor: ... + @overload + def flatten(self, start_dim: _int = 0, end_dim: _int = -1) -> Tensor: ... + @overload + def flatten(self, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ... + @overload + def flatten(self, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ... + @overload + def flatten(self, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ... + @overload + def flip(self, dims: _size) -> Tensor: ... + @overload + def flip(self, *dims: _int) -> Tensor: ... + def fliplr(self) -> Tensor: ... + def flipud(self) -> Tensor: ... + def float(self) -> Tensor: ... + @overload + def float_power(self, exponent: Tensor) -> Tensor: ... + @overload + def float_power(self, exponent: Union[Number, _complex]) -> Tensor: ... + @overload + def float_power_(self, exponent: Tensor) -> Tensor: ... + @overload + def float_power_(self, exponent: Union[Number, _complex]) -> Tensor: ... + def floor(self) -> Tensor: ... + def floor_(self) -> Tensor: ... + def floor_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor: ... + def floor_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: ... + def fmax(self, other: Tensor) -> Tensor: ... + def fmin(self, other: Tensor) -> Tensor: ... + @overload + def fmod(self, other: Tensor) -> Tensor: ... + @overload + def fmod(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def fmod_(self, other: Tensor) -> Tensor: ... + @overload + def fmod_(self, other: Union[Number, _complex]) -> Tensor: ... + def frac(self) -> Tensor: ... + def frac_(self) -> Tensor: ... + def frexp(self) -> torch.return_types.frexp: ... + @overload + def gather(self, dim: _int, index: Tensor, *, sparse_grad: _bool = False) -> Tensor: ... + @overload + def gather(self, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False) -> Tensor: ... + def gcd(self, other: Tensor) -> Tensor: ... + def gcd_(self, other: Tensor) -> Tensor: ... + @overload + def ge(self, other: Tensor) -> Tensor: ... + @overload + def ge(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def ge_(self, other: Tensor) -> Tensor: ... + @overload + def ge_(self, other: Union[Number, _complex]) -> Tensor: ... + def geometric_(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor: ... + def geqrf(self) -> torch.return_types.geqrf: ... + def ger(self, vec2: Tensor) -> Tensor: ... + def get_device(self) -> _int: ... + @overload + def greater(self, other: Tensor) -> Tensor: ... + @overload + def greater(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def greater_(self, other: Tensor) -> Tensor: ... + @overload + def greater_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def greater_equal(self, other: Tensor) -> Tensor: ... + @overload + def greater_equal(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def greater_equal_(self, other: Tensor) -> Tensor: ... + @overload + def greater_equal_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def gt(self, other: Tensor) -> Tensor: ... + @overload + def gt(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def gt_(self, other: Tensor) -> Tensor: ... + @overload + def gt_(self, other: Union[Number, _complex]) -> Tensor: ... + def half(self) -> Tensor: ... + def hardshrink(self, lambd: Union[Number, _complex] = 0.5) -> Tensor: ... + def has_names(self) -> _bool: ... + def heaviside(self, values: Tensor) -> Tensor: ... + def heaviside_(self, values: Tensor) -> Tensor: ... + def histc(self, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0) -> Tensor: ... + @overload + def histogram(self, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram: ... + @overload + def histogram(self, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram: ... + @overload + def hsplit(self, sections: _int) -> List[Tensor]: ... + @overload + def hsplit(self, indices: _size) -> List[Tensor]: ... + @overload + def hsplit(self, *indices: _int) -> List[Tensor]: ... + def hypot(self, other: Tensor) -> Tensor: ... + def hypot_(self, other: Tensor) -> Tensor: ... + def i0(self) -> Tensor: ... + def i0_(self) -> Tensor: ... + def igamma(self, other: Tensor) -> Tensor: ... + def igamma_(self, other: Tensor) -> Tensor: ... + def igammac(self, other: Tensor) -> Tensor: ... + def igammac_(self, other: Tensor) -> Tensor: ... + @overload + def index_add(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def index_add(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... + def index_add_(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def index_copy(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: ... + @overload + def index_copy(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ... + @overload + def index_copy_(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: ... + @overload + def index_copy_(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ... + @overload + def index_fill(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... + @overload + def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... + @overload + def index_fill(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def index_fill_(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... + @overload + def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... + @overload + def index_fill_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + def index_put(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... + def index_put_(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... + def index_reduce(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ... + def index_reduce_(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ... + @overload + def index_select(self, dim: _int, index: Tensor) -> Tensor: ... + @overload + def index_select(self, dim: Union[str, ellipsis, None], index: Tensor) -> Tensor: ... + def indices(self) -> Tensor: ... + def inner(self, other: Tensor) -> Tensor: ... + def int(self) -> Tensor: ... + def int_repr(self) -> Tensor: ... + def inverse(self) -> Tensor: ... + def is_coalesced(self) -> _bool: ... + def is_complex(self) -> _bool: ... + def is_conj(self) -> _bool: ... + def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool: ... + is_cpu: _bool + is_cuda: _bool + def is_distributed(self) -> _bool: ... + def is_floating_point(self) -> _bool: ... + def is_inference(self) -> _bool: ... + is_ipu: _bool + is_leaf: _bool + is_meta: _bool + is_mkldnn: _bool + is_mps: _bool + is_mtia: _bool + def is_neg(self) -> _bool: ... + is_nested: _bool + def is_nonzero(self) -> _bool: ... + is_ort: _bool + def is_pinned(self, device: Optional[Optional[DeviceLikeType]] = None) -> _bool: ... + is_quantized: _bool + def is_same_size(self, other: Tensor) -> _bool: ... + def is_set_to(self, tensor: Tensor) -> _bool: ... + def is_signed(self) -> _bool: ... + is_sparse: _bool + is_sparse_csr: _bool + is_vulkan: _bool + def isclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor: ... + def isfinite(self) -> Tensor: ... + def isinf(self) -> Tensor: ... + def isnan(self) -> Tensor: ... + def isneginf(self) -> Tensor: ... + def isposinf(self) -> Tensor: ... + def isreal(self) -> Tensor: ... + def istft(self, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ... + def item(self) -> Number: ... + def kron(self, other: Tensor) -> Tensor: ... + @overload + def kthvalue(self, k: _int, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.kthvalue: ... + @overload + def kthvalue(self, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.kthvalue: ... + def lcm(self, other: Tensor) -> Tensor: ... + def lcm_(self, other: Tensor) -> Tensor: ... + def ldexp(self, other: Tensor) -> Tensor: ... + def ldexp_(self, other: Tensor) -> Tensor: ... + @overload + def le(self, other: Tensor) -> Tensor: ... + @overload + def le(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def le_(self, other: Tensor) -> Tensor: ... + @overload + def le_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def lerp(self, end: Tensor, weight: Tensor) -> Tensor: ... + @overload + def lerp(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor: ... + @overload + def lerp_(self, end: Tensor, weight: Tensor) -> Tensor: ... + @overload + def lerp_(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor: ... + @overload + def less(self, other: Tensor) -> Tensor: ... + @overload + def less(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def less_(self, other: Tensor) -> Tensor: ... + @overload + def less_(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def less_equal(self, other: Tensor) -> Tensor: ... + @overload + def less_equal(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def less_equal_(self, other: Tensor) -> Tensor: ... + @overload + def less_equal_(self, other: Union[Number, _complex]) -> Tensor: ... + def lgamma(self) -> Tensor: ... + def lgamma_(self) -> Tensor: ... + def log(self) -> Tensor: ... + def log10(self) -> Tensor: ... + def log10_(self) -> Tensor: ... + def log1p(self) -> Tensor: ... + def log1p_(self) -> Tensor: ... + def log2(self) -> Tensor: ... + def log2_(self) -> Tensor: ... + def log_(self) -> Tensor: ... + def log_normal_(self, mean: _float = 1, std: _float = 2, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def log_softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def log_softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + def logaddexp(self, other: Tensor) -> Tensor: ... + def logaddexp2(self, other: Tensor) -> Tensor: ... + @overload + def logcumsumexp(self, dim: _int) -> Tensor: ... + @overload + def logcumsumexp(self, dim: Union[str, ellipsis, None]) -> Tensor: ... + def logdet(self) -> Tensor: ... + def logical_and(self, other: Tensor) -> Tensor: ... + def logical_and_(self, other: Tensor) -> Tensor: ... + def logical_not(self) -> Tensor: ... + def logical_not_(self) -> Tensor: ... + def logical_or(self, other: Tensor) -> Tensor: ... + def logical_or_(self, other: Tensor) -> Tensor: ... + def logical_xor(self, other: Tensor) -> Tensor: ... + def logical_xor_(self, other: Tensor) -> Tensor: ... + def logit(self, eps: Optional[_float] = None) -> Tensor: ... + def logit_(self, eps: Optional[_float] = None) -> Tensor: ... + @overload + def logsumexp(self, dim: Union[_int, _size], keepdim: _bool = False) -> Tensor: ... + @overload + def logsumexp(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False) -> Tensor: ... + def long(self) -> Tensor: ... + @overload + def lt(self, other: Tensor) -> Tensor: ... + @overload + def lt(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def lt_(self, other: Tensor) -> Tensor: ... + @overload + def lt_(self, other: Union[Number, _complex]) -> Tensor: ... + def lu_solve(self, LU_data: Tensor, LU_pivots: Tensor) -> Tensor: ... + def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ... + def map_(self, tensor: Tensor, callable: Callable) -> Tensor: ... + @overload + def masked_fill(self, mask: Tensor, value: Tensor) -> Tensor: ... + @overload + def masked_fill(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def masked_fill_(self, mask: Tensor, value: Tensor) -> Tensor: ... + @overload + def masked_fill_(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ... + def masked_scatter(self, mask: Tensor, source: Tensor) -> Tensor: ... + def masked_scatter_(self, mask: Tensor, source: Tensor) -> Tensor: ... + def masked_select(self, mask: Tensor) -> Tensor: ... + def matmul(self, other: Tensor) -> Tensor: ... + def matrix_exp(self) -> Tensor: ... + def matrix_power(self, n: _int) -> Tensor: ... + @overload + def max(self) -> Tensor: ... + @overload + def max(self, other: Tensor) -> Tensor: ... + @overload + def max(self, dim: _int, keepdim: _bool = False) -> torch.return_types.max: ... + @overload + def max(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.max: ... + def maximum(self, other: Tensor) -> Tensor: ... + @overload + def mean(self, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def mean(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def mean(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def median(self) -> Tensor: ... + @overload + def median(self, dim: _int, keepdim: _bool = False) -> torch.return_types.median: ... + @overload + def median(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.median: ... + @overload + def min(self) -> Tensor: ... + @overload + def min(self, other: Tensor) -> Tensor: ... + @overload + def min(self, dim: _int, keepdim: _bool = False) -> torch.return_types.min: ... + @overload + def min(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.min: ... + def minimum(self, other: Tensor) -> Tensor: ... + def mm(self, mat2: Tensor) -> Tensor: ... + @overload + def mode(self, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.mode: ... + @overload + def mode(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.mode: ... + @overload + def moveaxis(self, source: _int, destination: _int) -> Tensor: ... + @overload + def moveaxis(self, source: _size, destination: _size) -> Tensor: ... + @overload + def movedim(self, source: _int, destination: _int) -> Tensor: ... + @overload + def movedim(self, source: _size, destination: _size) -> Tensor: ... + def msort(self) -> Tensor: ... + def mul(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor: ... + def mul_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: ... + def multinomial(self, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def multiply(self, other: Tensor) -> Tensor: ... + @overload + def multiply(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def multiply_(self, other: Tensor) -> Tensor: ... + @overload + def multiply_(self, other: Union[Number, _complex]) -> Tensor: ... + def mv(self, vec: Tensor) -> Tensor: ... + def mvlgamma(self, p: _int) -> Tensor: ... + def mvlgamma_(self, p: _int) -> Tensor: ... + def nan_to_num(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ... + def nan_to_num_(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ... + def nanmean(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def nanmedian(self) -> Tensor: ... + @overload + def nanmedian(self, dim: _int, keepdim: _bool = False) -> torch.return_types.nanmedian: ... + @overload + def nanmedian(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.nanmedian: ... + @overload + def nanquantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: ... + @overload + def nanquantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: ... + def nansum(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def narrow(self, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: ... + @overload + def narrow(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ... + def narrow_copy(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ... + def ndimension(self) -> _int: ... + @overload + def ne(self, other: Tensor) -> Tensor: ... + @overload + def ne(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def ne_(self, other: Tensor) -> Tensor: ... + @overload + def ne_(self, other: Union[Number, _complex]) -> Tensor: ... + def neg(self) -> Tensor: ... + def neg_(self) -> Tensor: ... + def negative(self) -> Tensor: ... + def negative_(self) -> Tensor: ... + def nelement(self) -> _int: ... + @overload + def new(self, *args: Any, device: Optional[DeviceLikeType] = None) -> Tensor: ... + @overload + def new(self, storage: Storage) -> Tensor: ... + @overload + def new(self, other: Tensor) -> Tensor: ... + @overload + def new(self, size: _size, *, device: Optional[DeviceLikeType] = None) -> Tensor: ... + @overload + def new_empty(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + @overload + def new_empty(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + def new_empty_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + def new_full(self, size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + @overload + def new_ones(self, size: _size, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... + @overload + def new_ones(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + @overload + def new_ones(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + def new_tensor(self, data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ... + @overload + def new_zeros(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + @overload + def new_zeros(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... + def nextafter(self, other: Tensor) -> Tensor: ... + def nextafter_(self, other: Tensor) -> Tensor: ... + @overload + def nonzero(self, *, as_tuple: Literal[False] = False) -> Tensor: ... + @overload + def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ... + def nonzero_static(self, *, size: _int, fill_value: _int = -1) -> Tensor: ... + def normal_(self, mean: _float = 0, std: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def not_equal(self, other: Tensor) -> Tensor: ... + @overload + def not_equal(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def not_equal_(self, other: Tensor) -> Tensor: ... + @overload + def not_equal_(self, other: Union[Number, _complex]) -> Tensor: ... + def numel(self) -> _int: ... + def numpy(self, *, force: _bool = False) -> Any: ... + def orgqr(self, input2: Tensor) -> Tensor: ... + def ormqr(self, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False) -> Tensor: ... + def outer(self, vec2: Tensor) -> Tensor: ... + @overload + def permute(self, dims: _size) -> Tensor: ... + @overload + def permute(self, *dims: _int) -> Tensor: ... + def pin_memory(self, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ... + def pinverse(self, rcond: _float = 1e-15) -> Tensor: ... + def polygamma(self, n: _int) -> Tensor: ... + def polygamma_(self, n: _int) -> Tensor: ... + def positive(self) -> Tensor: ... + @overload + def pow(self, exponent: Tensor) -> Tensor: ... + @overload + def pow(self, exponent: Union[Number, _complex]) -> Tensor: ... + @overload + def pow_(self, exponent: Tensor) -> Tensor: ... + @overload + def pow_(self, exponent: Union[Number, _complex]) -> Tensor: ... + def prelu(self, weight: Tensor) -> Tensor: ... + @overload + def prod(self, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def prod(self, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def prod(self, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + def put(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ... + def put_(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ... + def q_per_channel_axis(self) -> _int: ... + def q_per_channel_scales(self) -> Tensor: ... + def q_per_channel_zero_points(self) -> Tensor: ... + def q_scale(self) -> _float: ... + def q_zero_point(self) -> _int: ... + def qr(self, some: _bool = True) -> torch.return_types.qr: ... + def qscheme(self) -> _qscheme: ... + @overload + def quantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: ... + @overload + def quantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: ... + def rad2deg(self) -> Tensor: ... + def rad2deg_(self) -> Tensor: ... + @overload + def random_(self, *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def random_(self, from_: _int, to: Optional[_int], *, generator: Optional[Generator] = None) -> Tensor: ... + @overload + def random_(self, to: _int, *, generator: Optional[Generator] = None) -> Tensor: ... + def ravel(self) -> Tensor: ... + def reciprocal(self) -> Tensor: ... + def reciprocal_(self) -> Tensor: ... + def record_stream(self, s: Stream) -> None: ... + def refine_names(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... + def relu(self) -> Tensor: ... + def relu_(self) -> Tensor: ... + @overload + def remainder(self, other: Tensor) -> Tensor: ... + @overload + def remainder(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def remainder_(self, other: Tensor) -> Tensor: ... + @overload + def remainder_(self, other: Union[Number, _complex]) -> Tensor: ... + def rename(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ... + def rename_(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ... + def renorm(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor: ... + def renorm_(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor: ... + @overload + def repeat(self, repeats: Sequence[Union[_int, SymInt]]) -> Tensor: ... + @overload + def repeat(self, *repeats: _int) -> Tensor: ... + @overload + def repeat_interleave(self, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ... + @overload + def repeat_interleave(self, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ... + def requires_grad_(self, mode: _bool = True) -> Tensor: ... + @overload + def reshape(self, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ... + @overload + def reshape(self, *shape: _int) -> Tensor: ... + def reshape_as(self, other: Tensor) -> Tensor: ... + @overload + def resize_(self, size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None) -> Tensor: ... + @overload + def resize_(self, *size: _int, memory_format: Optional[memory_format] = None) -> Tensor: ... + def resize_as_(self, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ... + def resize_as_sparse_(self, the_template: Tensor) -> Tensor: ... + def resolve_conj(self) -> Tensor: ... + def resolve_neg(self) -> Tensor: ... + def retain_grad(self) -> None: ... + def roll(self, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor: ... + def rot90(self, k: _int = 1, dims: _size = (0,1)) -> Tensor: ... + @overload + def round(self) -> Tensor: ... + @overload + def round(self, *, decimals: _int) -> Tensor: ... + @overload + def round_(self) -> Tensor: ... + @overload + def round_(self, *, decimals: _int) -> Tensor: ... + def row_indices(self) -> Tensor: ... + def rsqrt(self) -> Tensor: ... + def rsqrt_(self) -> Tensor: ... + @overload + def scatter(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... + @overload + def scatter(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: ... + @overload + def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor: ... + @overload + def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ... + @overload + def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def scatter_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... + @overload + def scatter_(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: ... + @overload + def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor: ... + @overload + def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... + @overload + def scatter_add(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... + @overload + def scatter_add(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ... + def scatter_add_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ... + def scatter_reduce(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ... + def scatter_reduce_(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ... + @overload + def select(self, dim: _int, index: Union[_int, SymInt]) -> Tensor: ... + @overload + def select(self, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ... + def select_scatter(self, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ... + @overload + def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage], offset: _int, size: _size, stride: _size) -> Tensor: ... + @overload + def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage]) -> Tensor: ... + def sgn(self) -> Tensor: ... + def sgn_(self) -> Tensor: ... + def short(self) -> Tensor: ... + def sigmoid(self) -> Tensor: ... + def sigmoid_(self) -> Tensor: ... + def sign(self) -> Tensor: ... + def sign_(self) -> Tensor: ... + def signbit(self) -> Tensor: ... + def sin(self) -> Tensor: ... + def sin_(self) -> Tensor: ... + def sinc(self) -> Tensor: ... + def sinc_(self) -> Tensor: ... + def sinh(self) -> Tensor: ... + def sinh_(self) -> Tensor: ... + @overload + def size(self, dim: None = None) -> Size: ... + @overload + def size(self, dim: _int) -> _int: ... + def slice_scatter(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ... + def slogdet(self) -> torch.return_types.slogdet: ... + def smm(self, mat2: Tensor) -> Tensor: ... + @overload + def softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def sort(self, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False) -> torch.return_types.sort: ... + @overload + def sort(self, dim: _int = -1, descending: _bool = False) -> torch.return_types.sort: ... + @overload + def sort(self, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort: ... + @overload + def sort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort: ... + def sparse_dim(self) -> _int: ... + def sparse_mask(self, mask: Tensor) -> Tensor: ... + def sparse_resize_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: ... + def sparse_resize_and_clear_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: ... + @overload + def split(self, split_size: _int, dim: _int = 0) -> Sequence[Tensor]: ... + @overload + def split(self, split_size: Tuple[_int, ...], dim: _int = 0) -> Sequence[Tensor]: ... + def split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ... + def sqrt(self) -> Tensor: ... + def sqrt_(self) -> Tensor: ... + def square(self) -> Tensor: ... + def square_(self) -> Tensor: ... + @overload + def squeeze(self) -> Tensor: ... + @overload + def squeeze(self, dim: _int) -> Tensor: ... + @overload + def squeeze(self, dim: _size) -> Tensor: ... + @overload + def squeeze(self, *dim: _int) -> Tensor: ... + @overload + def squeeze(self, dim: Union[str, ellipsis, None]) -> Tensor: ... + @overload + def squeeze_(self) -> Tensor: ... + @overload + def squeeze_(self, dim: _int) -> Tensor: ... + @overload + def squeeze_(self, dim: _size) -> Tensor: ... + @overload + def squeeze_(self, *dim: _int) -> Tensor: ... + @overload + def squeeze_(self, dim: Union[str, ellipsis, None]) -> Tensor: ... + def sspaddmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def std(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: ... + @overload + def std(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: ... + @overload + def std(self, unbiased: _bool = True) -> Tensor: ... + @overload + def std(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: ... + @overload + def std(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: ... + def untyped_storage(self) -> UntypedStorage: ... + def storage_offset(self) -> _int: ... + def storage_type(self) -> Storage: ... + @overload + def stride(self, dim: None = None) -> Tuple[_int, ...]: ... + @overload + def stride(self, dim: _int) -> _int: ... + def sub(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ... + def sub_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number] = 1) -> Tensor: ... + @overload + def subtract(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def subtract(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def subtract_(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def subtract_(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... + @overload + def sum(self, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def sum(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def sum(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def sum_to_size(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: ... + @overload + def sum_to_size(self, *size: _int) -> Tensor: ... + def svd(self, some: _bool = True, compute_uv: _bool = True) -> torch.return_types.svd: ... + def swapaxes(self, axis0: _int, axis1: _int) -> Tensor: ... + def swapaxes_(self, axis0: _int, axis1: _int) -> Tensor: ... + def swapdims(self, dim0: _int, dim1: _int) -> Tensor: ... + def swapdims_(self, dim0: _int, dim1: _int) -> Tensor: ... + def t(self) -> Tensor: ... + def t_(self) -> Tensor: ... + def take(self, index: Tensor) -> Tensor: ... + def take_along_dim(self, indices: Tensor, dim: Optional[_int] = None) -> Tensor: ... + def tan(self) -> Tensor: ... + def tan_(self) -> Tensor: ... + def tanh(self) -> Tensor: ... + def tanh_(self) -> Tensor: ... + @overload + def tensor_split(self, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ... + @overload + def tensor_split(self, tensor_indices_or_sections: Tensor, dim: _int = 0) -> List[Tensor]: ... + @overload + def tensor_split(self, sections: Union[_int, SymInt], dim: _int = 0) -> List[Tensor]: ... + @overload + def tile(self, dims: Sequence[Union[_int, SymInt]]) -> Tensor: ... + @overload + def tile(self, *dims: _int) -> Tensor: ... + @overload + def to(self, dtype: _dtype, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor: ... + @overload + def to(self, device: Optional[DeviceLikeType] = None, dtype: Optional[_dtype] = None, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor: ... + @overload + def to(self, other: Tensor, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor: ... + def to_dense(self, dtype: Optional[_dtype] = None, *, masked_grad: Optional[_bool] = None) -> Tensor: ... + def to_mkldnn(self, dtype: Optional[_dtype] = None) -> Tensor: ... + def to_padded_tensor(self, padding: _float, output_size: Optional[Sequence[Union[_int, SymInt]]] = None) -> Tensor: ... + @overload + def to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor: ... + @overload + def to_sparse(self, sparse_dim: _int) -> Tensor: ... + def to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ... + def to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ... + def to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor: ... + def to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor: ... + def tolist(self) -> List: ... + def topk(self, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True) -> torch.return_types.topk: ... + def trace(self) -> Tensor: ... + @overload + def transpose(self, dim0: _int, dim1: _int) -> Tensor: ... + @overload + def transpose(self, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ... + def transpose_(self, dim0: _int, dim1: _int) -> Tensor: ... + def triangular_solve(self, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False) -> torch.return_types.triangular_solve: ... + def tril(self, diagonal: _int = 0) -> Tensor: ... + def tril_(self, diagonal: _int = 0) -> Tensor: ... + def triu(self, diagonal: _int = 0) -> Tensor: ... + def triu_(self, diagonal: _int = 0) -> Tensor: ... + def true_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor: ... + def true_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: ... + def trunc(self) -> Tensor: ... + def trunc_(self) -> Tensor: ... + @overload + def type(self, dtype: None = None, non_blocking: _bool = False) -> str: ... + @overload + def type(self, dtype: Union[str, _dtype], non_blocking: _bool = False) -> Tensor: ... + def type_as(self, other: Tensor) -> Tensor: ... + @overload + def unbind(self, dim: _int = 0) -> List[Tensor]: ... + @overload + def unbind(self, dim: Union[str, ellipsis, None]) -> List[Tensor]: ... + @overload + def unflatten(self, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... + @overload + def unflatten(self, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: ... + def unfold(self, dimension: _int, size: _int, step: _int) -> Tensor: ... + def uniform_(self, from_: _float = 0, to: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: ... + def unsafe_chunk(self, chunks: _int, dim: _int = 0) -> List[Tensor]: ... + def unsafe_split(self, split_size: Union[_int, SymInt], dim: _int = 0) -> List[Tensor]: ... + def unsafe_split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ... + def unsqueeze(self, dim: _int) -> Tensor: ... + def unsqueeze_(self, dim: _int) -> Tensor: ... + def values(self) -> Tensor: ... + @overload + def var(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: ... + @overload + def var(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: ... + @overload + def var(self, unbiased: _bool = True) -> Tensor: ... + @overload + def var(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: ... + @overload + def var(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: ... + def vdot(self, other: Tensor) -> Tensor: ... + @overload + def view(self, dtype: _dtype) -> Tensor: ... + @overload + def view(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: ... + @overload + def view(self, *size: _int) -> Tensor: ... + def view_as(self, other: Tensor) -> Tensor: ... + @overload + def vsplit(self, sections: _int) -> List[Tensor]: ... + @overload + def vsplit(self, indices: _size) -> List[Tensor]: ... + @overload + def vsplit(self, *indices: _int) -> List[Tensor]: ... + @overload + def where(self, condition: Tensor, other: Tensor) -> Tensor: ... + @overload + def where(self, condition: Tensor, other: Union[Number, _complex]) -> Tensor: ... + @overload + def xlogy(self, other: Tensor) -> Tensor: ... + @overload + def xlogy(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def xlogy_(self, other: Tensor) -> Tensor: ... + @overload + def xlogy_(self, other: Union[Number, _complex]) -> Tensor: ... + def zero_(self) -> Tensor: ... + +_TensorBase = TensorBase + +# Defined in torch/csrc/multiprocessing/init.cpp +def _multiprocessing_init() -> None: ... + +# Defined in torch/csrc/mps/Module.cpp +def _mps_deviceSynchronize() -> None: ... +def _mps_get_default_generator() -> Generator: ... +def _mps_emptyCache() -> None: ... +def _mps_setMemoryFraction(fraction: _float) -> None: ... +def _mps_currentAllocatedMemory() -> _int: ... +def _mps_driverAllocatedMemory() -> _int: ... +def _mps_is_available() -> _bool: ... +def _mps_is_on_macos_13_or_newer(minor: _int) -> _bool: ... +def _mps_profilerStartTrace(mode: str, wait_until_completed: _bool) -> None: ... +def _mps_profilerStopTrace() -> None: ... +def _mps_acquireEvent(enable_timing: _bool) -> _int: ... +def _mps_releaseEvent(event_id: _int) -> None: ... +def _mps_recordEvent(event_id: _int) -> None: ... +def _mps_waitForEvent(event_id: _int) -> None: ... +def _mps_synchronizeEvent(event_id: _int) -> None: ... +def _mps_queryEvent(event_id: _int) -> _bool: ... +def _mps_elapsedTimeOfEvents(start_event_id: _int, end_event_id: _int) -> _float: ... + + +# Defined in torch/csrc/cuda/Module.cpp +def _cuda_getCurrentStream(device: _int) -> Tuple: ... +def _cuda_getCurrentRawStream(device: _int) -> _int: ... +def _cuda_getDefaultStream(device: _int) -> Tuple: ... +def _cuda_getCurrentBlasHandle() -> _int: ... +def _cuda_clearCublasWorkspaces() -> None: ... +def _cuda_setDevice(device: _int) -> None: ... +def _cuda_exchangeDevice(device: _int) -> _int: ... +def _cuda_maybeExchangeDevice(device: _int) -> _int: ... +def _cuda_getDevice() -> _int: ... +def _cuda_getDeviceCount() -> _int: ... +def _cuda_set_sync_debug_mode(warn_level: Union[_int, str]) -> None: ... +def _cuda_get_sync_debug_mode() -> _int: ... +def _cuda_sleep(cycles: _int) -> None: ... +def _cuda_synchronize() -> None: ... +def _cuda_ipc_collect() -> None: ... +def _cuda_getArchFlags() -> Optional[str]: ... +def _cuda_init() -> None: ... +def _cuda_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ... +def _cuda_getCompiledVersion() -> _int: ... +def _cuda_cudaHostAllocator() -> _int: ... +def _cuda_cudaCachingAllocator_raw_alloc(size: _int, cuda_stream: _int) -> _int: ... +def _cuda_cudaCachingAllocator_raw_delete(ptr: _int) -> None: ... +def _cuda_cudaCachingAllocator_set_allocator_settings(env: str) -> None: ... +def _cuda_beginAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ... +def _cuda_endAllocateCurrentStreamToPool(device: _int) -> None: ... +def _cuda_releasePool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ... +def _cuda_checkPoolLiveAllocations(device: _int, mempool_id: Tuple[_int, _int], expected_live_allocations: Set) -> _bool: ... +def _cuda_setCheckpointPoolState(device: _int, state: _cuda_CUDAAllocator_AllocatorState, stale_storages: List[_int], storages_to_add_deleters_to: List[_int]) -> None: ... +def _cuda_setMemoryFraction(fraction: _float, device: _int) -> None: ... +def _cuda_emptyCache() -> None: ... +def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ... +def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ... +def _cuda_resetPeakMemoryStats(device: _int) -> None: ... +def _cuda_memorySnapshot() -> Dict[str, Any]: ... +def _cuda_record_memory_history_legacy( + enabled: _bool, + record_context: _bool, + record_context_cpp: _bool, + alloc_trace_max_entries: _int, + alloc_trace_record_context: _bool, +) -> None: ... +def _cuda_record_memory_history( + enabled: Optional[str], + context: Optional[str], + stacks: str, + max_entries +) -> None: ... +def _cuda_isHistoryEnabled() -> _bool: ... + +def _cuda_getAllocatorBackend() -> str: ... +class _cuda_CUDAAllocator_AllocatorState: + pass +def _cuda_getCheckpointState(device: _int, mempool: Tuple[_int, _int]) -> _cuda_CUDAAllocator_AllocatorState: ... +def _set_cached_tensors_enabled(enabled: _bool) -> None: ... +def _add_cached_tensor(t: Tensor) -> None: ... +def _remove_cached_tensor(t: Tensor) -> None: ... +def _construct_CUDA_Tensor_From_Storage_And_Metadata(metadata: dict, storage: Storage) -> Tensor: ... +def _storage_Use_Count(storage_ptr: _int) -> _int: ... +def _set_storage_access_error_msg(t: Tensor, s: str) -> None: ... +def _free_And_Remove_DeleterFn(storage_ptr: _int) -> None: ... +def _has_Standard_Deleter(storage_ptr: _int) -> _bool: ... + +class _cuda_CUDAAllocator: ... + +def _cuda_customAllocator(alloc_fn: _int, free_fn: _int) -> _cuda_CUDAAllocator: ... +def _cuda_changeCurrentAllocator(allocator: _cuda_CUDAAllocator) -> None: ... +def _cuda_getAllocator() -> _cuda_CUDAAllocator: ... +def _cuda_lock_mutex() -> None: ... +def _cuda_unlock_mutex() -> None: ... +def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ... +def _cuda_jiterator_compile_and_launch_kernel( + code_string: str, + kernel_name: str, + return_by_ref: _bool, + num_outputs: _int, + tensors: Tuple, + kwargs: Dict[str, Union[_int, _float, _bool]], +) -> Tensor: ... +def _cuda_get_cudnn_benchmark_limit() -> _int: ... +def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ... +def _cuda_get_conv_benchmark_empty_cache() -> _bool: ... +def _cudnn_set_conv_benchmark_empty_cache(enable: _bool) -> None: ... +def _nccl_version() -> _int: ... +def _nccl_version_suffix() -> bytes : ... +def _nccl_unique_id() -> bytes: ... +def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ... +def _nccl_reduce( + input: Sequence[Tensor], + output: Tensor, + root: _int, + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_all_reduce( + input: Sequence[Tensor], + output: Sequence[Tensor], + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_broadcast( + input: Sequence[Tensor], + root: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_all_gather( + input: Sequence[Tensor], + output: Sequence[Tensor], + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_reduce_scatter( + input: Sequence[Tensor], + output: Sequence[Tensor], + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _rocm_is_backward_pass() -> _bool: ... + +class _CudaDeviceProperties: + name: str + major: _int + minor: _int + multi_processor_count: _int + total_memory: _int + is_integrated: _int + is_multi_gpu_board: _int + max_threads_per_multi_processor: _int + gcnArchName: str + +# Functions related to SDPA +class _SDPAParams: + query: Tensor + key: Tensor + value: Tensor + attn_mask: Optional[Tensor] + dropout: _float + is_causal: _bool + def __init__( + self, + query: Tensor, + key: Tensor, + value: Tensor, + attn_mask: Optional[Tensor], + dropout: _float, + is_causal: _bool) -> None: ... + +class _SDPBackend(Enum): + ERROR = -1 + MATH = 0 + FLASH_ATTENTION = 1 + EFFICIENT_ATTENTION = 2 + +def _can_use_flash_attention(params: _SDPAParams, debug: _bool) -> _bool: ... +def _can_use_mem_efficient_attention(params: _SDPAParams, debug: _bool) -> _bool: ... + +# Defined in torch/csrc/cuda/python_comm.cpp +def _broadcast(tensor: Tensor, devices: List[_int]) -> List[Tensor]: ... +def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: ... +def _broadcast_coalesced( + tensors: List[Tensor], + devices: List[_int], + buffer_size: _int, +) -> List[List[Tensor]]: ... +def _scatter( + tensor: Tensor, + devices: List[_int], + chunk_sizes: Optional[List[_int]], + dim: _int, + streams: Optional[List[Stream]], +) -> List[Tensor]: ... +def _scatter_out( + tensor: Tensor, + out_tensors: List[Tensor], + dim: _int, + streams: Optional[List[Stream]], +) -> List[Tensor]: ... +def _gather( + tensors: List[Tensor], + dim: _int, + destination_index: Optional[_int], +) -> Tensor: ... +def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ... + +# Defined in torch/csrc/cuda/Stream.cpp +class _CudaStreamBase(Stream): + stream_id: _int + device_index: _int + device_type: _int + + device: _device + cuda_stream: _int + priority: _int + + def __new__( + self, + priority: _int = 0, + stream_id: _int = 0, + device_index: _int = 0, + stream_ptr: _int = 0, + ) -> _CudaStreamBase: ... + def query(self) -> _bool: ... + def synchronize(self) -> None: ... + def priority_range(self) -> Tuple[_int, _int]: ... + +# Defined in torch/csrc/cuda/Event.cpp +class _CudaEventBase: + device: _device + cuda_event: _int + + def __new__( + cls, + enable_timing: _bool = False, + blocking: _bool = False, + interprocess: _bool = False, + ) -> _CudaEventBase: ... + @classmethod + def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ... + def record(self, stream: _CudaStreamBase) -> None: ... + def wait(self, stream: _CudaStreamBase) -> None: ... + def query(self) -> _bool: ... + def elapsed_time(self, other: _CudaEventBase) -> _float: ... + def synchronize(self) -> None: ... + def ipc_handle(self) -> bytes: ... + +# Defined in torch/csrc/cuda/Graph.cpp +class _CUDAGraph: + def capture_begin(self, pool: Optional[Tuple[_int, _int]] = ..., capture_error_mode: str = "global") -> None: ... + def capture_end(self) -> None: ... + def replay(self) -> None: ... + def reset(self) -> None: ... + def pool(self) -> Tuple[_int, _int]: ... + def enable_debug_mode(self) -> None: ... + def debug_dump(self, debug_path: str) -> None: ... + +def _cuda_isCurrentStreamCapturing() -> _bool: ... +def _graph_pool_handle() -> Tuple[_int, _int]: ... + +# Defined in torch/csrc/DataLoader.cpp +def _set_worker_signal_handlers( + *arg: Any, +) -> None: ... # THPModule_setWorkerSignalHandlers +def _set_worker_pids( + key: _int, + child_pids: Tuple[_int, ...], +) -> None: ... # THPModule_setWorkerPIDs +def _remove_worker_pids(loader_id: _int) -> None: ... # THPModule_removeWorkerPIDs +def _error_if_any_worker_fails() -> None: ... # THPModule_errorIfAnyWorkerFails + +# Defined in torch/csrc/jit/python/python_tracer.cpp +class TracingState: + def push_scope(self, scope_name: str) -> None: ... + def pop_scope(self) -> None: ... + def current_scope(self) -> str: ... + def set_graph(self, graph: Graph) -> None: ... + def graph(self) -> Graph: ... + +def _create_graph_by_tracing( + func: Callable[..., Any], + inputs: Any, + var_name_lookup_fn: Callable[[Tensor], str], + strict: Any, + force_outplace: Any, + self: Any = None, + argument_names: List[str] = [], +) -> Tuple[Graph, Stack]: ... +def _tracer_warn_use_python(): ... +def _get_tracing_state() -> TracingState: ... + +# Defined in torch/csrc/jit/python/python_ir.cpp +# Not actually defined in python_ir.cpp, not sure where they are. +class IValue: ... + +Stack = List[IValue] + +class JitType: + annotation_str: str + def isSubtypeOf(self, other: JitType) -> _bool: ... + def with_dtype(self, dtype: _dtype) -> JitType: ... + def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ... + def kind(self) -> str: ... + def scalarType(self) -> Optional[str]: ... + def getElementType(self) -> JitType: ... + def dtype(self) -> Optional[_dtype]: ... + +class InferredType: + def __init__(self, arg: Union[JitType, str]): ... + def type(self) -> JitType: ... + def success(self) -> _bool: ... + def reason(self) -> str: ... + +R = TypeVar("R", bound=JitType) + +class AnyType(JitType): + @staticmethod + def get() -> AnyType: ... + +class NoneType(JitType): + @staticmethod + def get() -> NoneType: ... + +class BoolType(JitType): + @staticmethod + def get() -> BoolType: ... + +class FloatType(JitType): + @staticmethod + def get() -> FloatType: ... + +class ComplexType(JitType): + @staticmethod + def get() -> ComplexType: ... + +class IntType(JitType): + @staticmethod + def get() -> IntType: ... + +class SymIntType(JitType): + @staticmethod + def get() -> SymIntType: ... + +class SymBoolType(JitType): + @staticmethod + def get() -> SymBoolType: ... + +class NumberType(JitType): + @staticmethod + def get() -> NumberType: ... + +class StringType(JitType): + @staticmethod + def get() -> StringType: ... + +class DeviceObjType(JitType): + @staticmethod + def get() -> DeviceObjType: ... + +class _GeneratorType(JitType): + @staticmethod + def get() -> _GeneratorType: ... + +class StreamObjType(JitType): + @staticmethod + def get() -> StreamObjType: ... + +class ListType(JitType): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + @staticmethod + def ofInts() -> ListType: ... + @staticmethod + def ofTensors() -> ListType: ... + @staticmethod + def ofFloats() -> ListType: ... + @staticmethod + def ofComplexDoubles() -> ListType: ... + @staticmethod + def ofBools() -> ListType: ... + @staticmethod + def ofStrings() -> ListType: ... + +class DictType(JitType): + def __init__(self, key: JitType, value: JitType) -> None: ... + def getKeyType(self) -> JitType: ... + def getValueType(self) -> JitType: ... + +class TupleType(JitType): + def __init__(self, a: List[Optional[JitType]]) -> None: ... + def elements(self) -> List[JitType]: ... + +class UnionType(JitType): + def __init__(self, a: List[JitType]) -> None: ... + +class ClassType(JitType): + def __init__(self, qualified_name: str) -> None: ... + +class InterfaceType(JitType): + def __init__(self, qualified_name: str) -> None: ... + def getMethod(self, name: str) -> Optional[FunctionSchema]: ... + def getMethodNames(self) -> List[str]: ... + +class OptionalType(JitType, Generic[R]): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + @staticmethod + def ofTensor() -> OptionalType: ... + +class FutureType(JitType): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + +class AwaitType(JitType): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + +class RRefType(JitType): + def __init__(self, a: JitType) -> None: ... + +class EnumType(JitType): + def __init__( + self, + qualified_name: str, + value_type: JitType, + enum_names_values: List[Any], + ) -> None: ... + +class TensorType(JitType): + @classmethod + def get(cls) -> TensorType: ... + @classmethod + def getInferred(cls) -> TensorType: ... + def with_sizes(self, other: Optional[List[Optional[_int]]]) -> TensorType: ... + def sizes(self) -> Optional[List[_int]]: ... + def varyingSizes(self) -> Optional[List[Optional[_int]]]: ... + def strides(self) -> Optional[List[_int]]: ... + def device(self) -> Optional[_device]: ... + def dim(self) -> _int: ... + def dtype(self) -> Optional[_dtype]: ... + @staticmethod + def create_from_tensor(t: Tensor) -> TensorType: ... + +# Defined in torch/csrc/jit/python/python_tree_views.cpp +class SourceRange: ... +class TreeView: ... + +class Ident(TreeView): + @property + def name(self) -> str: ... + +class ClassDef(TreeView): ... + +class Def(TreeView): + def name(self) -> Ident: ... + +class Decl(TreeView): ... + +# Defined in torch/csrc/distributed/rpc/init.cpp +def _rpc_init() -> _bool: ... + +# Defined in torch/csrc/distributed/autograd/init.cpp +def _dist_autograd_init() -> _bool: ... + +# Defined in torch/csrc/distributed/c10d/init.cpp +def _c10d_init() -> _bool: ... + +# Defined in torch/csrc/distributed/rpc/testing/init.cpp +def _faulty_agent_init() -> _bool: ... +def _register_py_class_for_device(device: str, cls: Any) -> None: ... +def _activate_cuda_trace() -> None: ... + +# Defined in torch/csrc/Module.cpp +def _current_graph_task_id() -> _int: ... +def _current_autograd_node() -> _Node: ... + +# Defined in torch/csrc/Exceptions.cpp +class _OutOfMemoryError(RuntimeError): ... +class _DistError(RuntimeError): ... +class _DistBackendError(RuntimeError): ... +class _DistStoreError(RuntimeError): ... +class _DistNetworkError(RuntimeError): ... + +# Defined in torch/csrc/profiler/init.cpp +class CapturedTraceback: + pass +def gather_traceback(python: _bool, script: _bool, cpp: _bool) -> CapturedTraceback: ... +def symbolize_tracebacks(tracebacks: List[CapturedTraceback]) -> List[Dict[str, Any]]: ... + +def _load_mobile_module_from_file(filename: str): ... +def _load_mobile_module_from_bytes(bytes_: bytes): ... +def _load_jit_module_from_file(filename: str): ... +def _load_jit_module_from_bytes(bytes_: bytes): ... +def _save_mobile_module(m: LiteScriptModule, filename: str): ... +def _save_jit_module(m: ScriptModule, filename: str, extra_files: Dict[str, Any]): ... +def _save_mobile_module_to_bytes(m: LiteScriptModule) -> bytes: ... +def _save_jit_module_to_bytes(m: ScriptModule, extra_files: Dict[str, Any]) -> bytes: ... +def _get_module_info_from_flatbuffer(data: bytes): ... +def _jit_resolve_packet(op_name: str, *args, **kwargs) -> str: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_cudnn.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_cudnn.pyi new file mode 100644 index 0000000000000000000000000000000000000000..689c984b9d7de1ca98329495223dcb0a13a54f4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_cudnn.pyi @@ -0,0 +1,17 @@ +from enum import Enum + +from torch.types import _bool, Tuple + +# Defined in torch/csrc/cuda/shared/cudnn.cpp +is_cuda: _bool + +def getRuntimeVersion() -> Tuple[int, int, int]: ... +def getCompileVersion() -> Tuple[int, int, int]: ... +def getVersionInt() -> int: ... + +class RNNMode(int, Enum): + value: int + rnn_relu = ... + rnn_tanh = ... + lstm = ... + gru = ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7909e0b8e33c6a6e3ee72f2bbbac40b914ddce93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi @@ -0,0 +1,188 @@ +# mypy: disable-error-code="type-arg" +from datetime import timedelta +from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar + +import torch + +from . import Future +from ._autograd import ProfilerEvent +from ._distributed_c10d import Store +from ._profiler import ProfilerConfig + +# This module is defined in torch/csrc/distributed/rpc/init.cpp + +_DEFAULT_INIT_METHOD: str +_DEFAULT_NUM_WORKER_THREADS: int +_UNSET_RPC_TIMEOUT: float +_DEFAULT_RPC_TIMEOUT_SEC: float + +_T = TypeVar("_T") + +class RpcBackendOptions: + rpc_timeout: float + init_method: str + def __init__( + self, + rpc_timeout: float = ..., + init_method: str = ..., + ): ... + +class WorkerInfo: + def __init__(self, name: str, worker_id: int): ... + @property + def name(self) -> str: ... + @property + def id(self) -> int: ... + def __eq__(self, other: object) -> bool: ... + +class RpcAgent: + def join(self, shutdown: bool = False, timeout: float = 0): ... + def sync(self): ... + def shutdown(self): ... + @overload + def get_worker_info(self) -> WorkerInfo: ... + @overload + def get_worker_info(self, workerName: str) -> WorkerInfo: ... + def get_worker_infos(self) -> List[WorkerInfo]: ... + def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ... + def get_debug_info(self) -> Dict[str, str]: ... + def get_metrics(self) -> Dict[str, str]: ... + +class PyRRef(Generic[_T]): + def __init__(self, value: _T, type_hint: Any = None) -> None: ... + def is_owner(self) -> bool: ... + def confirmed_by_owner(self) -> bool: ... + def owner(self) -> WorkerInfo: ... + def owner_name(self) -> str: ... + def to_here(self, timeout: float = ...) -> _T: ... + def local_value(self) -> Any: ... + def rpc_sync(self, timeout: float = ...) -> Any: ... + def rpc_async(self, timeout: float = ...) -> Any: ... + def remote(self, timeout: float = ...) -> Any: ... + def _serialize(self) -> Tuple: ... + @staticmethod + def _deserialize(tp: Tuple) -> PyRRef: ... + def _get_type(self) -> Type[_T]: ... + def _get_future(self) -> Future[_T]: ... + def _get_profiling_future(self) -> Future[_T]: ... + def _set_profiling_future(self, profilingFuture: Future[_T]): ... + +class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions): + num_worker_threads: int + device_maps: Dict[str, Dict[torch.device, torch.device]] + devices: List[torch.device] + def __init__( + self, + num_worker_threads: int, + _transports: Optional[List], + _channels: Optional[List], + rpc_timeout: float = ..., + init_method: str = ..., + device_maps: Dict[str, Dict[torch.device, torch.device]] = {}, # noqa: B006 + devices: List[torch.device] = [], # noqa: B006 + ): ... + def _set_device_map( + self, + to: str, + device_map: Dict[torch.device, torch.device], + ): ... + +class TensorPipeAgent(RpcAgent): + def __init__( + self, + store: Store, + name: str, + worker_id: int, + world_size: Optional[int], + opts: _TensorPipeRpcBackendOptionsBase, + reverse_device_maps: Dict[str, Dict[torch.device, torch.device]], + devices: List[torch.device], + ): ... + def join(self, shutdown: bool = False, timeout: float = 0): ... + def shutdown(self): ... + @overload + def get_worker_info(self) -> WorkerInfo: ... + @overload + def get_worker_info(self, workerName: str) -> WorkerInfo: ... + @overload + def get_worker_info(self, id: int) -> WorkerInfo: ... + def get_worker_infos(self) -> List[WorkerInfo]: ... + def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ... + def _update_group_membership( + self, + worker_info: WorkerInfo, + my_devices: List[torch.device], + reverse_device_map: Dict[str, Dict[torch.device, torch.device]], + is_join: bool, + ): ... + def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ... + @property + def is_static_group(self) -> bool: ... + @property + def store(self) -> Store: ... + +def _is_current_rpc_agent_set() -> bool: ... +def _get_current_rpc_agent() -> RpcAgent: ... +def _set_and_start_rpc_agent(agent: RpcAgent): ... +def _reset_current_rpc_agent(): ... +def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ... +def _destroy_rref_context(ignoreRRefLeak: bool): ... +def _rref_context_get_debug_info() -> Dict[str, str]: ... +def _cleanup_python_rpc_handler(): ... +def _invoke_rpc_builtin( + dst: WorkerInfo, + opName: str, + rpcTimeoutSeconds: float, + *args: Any, + **kwargs: Any, +): ... +def _invoke_rpc_python_udf( + dst: WorkerInfo, + pickledPythonUDF: str, + tensors: List[torch.Tensor], + rpcTimeoutSeconds: float, + isAsyncExecution: bool, +): ... +def _invoke_rpc_torchscript( + dstWorkerName: str, + qualifiedNameStr: str, + argsTuple: Tuple, + kwargsDict: Dict, + rpcTimeoutSeconds: float, + isAsyncExecution: bool, +): ... +def _invoke_remote_builtin( + dst: WorkerInfo, + opName: str, + rpcTimeoutSeconds: float, + *args: Any, + **kwargs: Any, +): ... +def _invoke_remote_python_udf( + dst: WorkerInfo, + pickledPythonUDF: str, + tensors: List[torch.Tensor], + rpcTimeoutSeconds: float, + isAsyncExecution: bool, +): ... +def _invoke_remote_torchscript( + dstWorkerName: WorkerInfo, + qualifiedNameStr: str, + rpcTimeoutSeconds: float, + isAsyncExecution: bool, + *args: Any, + **kwargs: Any, +): ... +def get_rpc_timeout() -> float: ... +def enable_gil_profiling(flag: bool): ... +def _set_rpc_timeout(rpcTimeoutSeconds: float): ... + +class RemoteProfilerManager: + @staticmethod + def set_current_profiling_key(key: str): ... + +def _enable_server_process_global_profiler(new_config: ProfilerConfig): ... +def _disable_server_process_global_profiler() -> List[List[List[ProfilerEvent]]]: ... +def _set_profiler_node_id(default_node_id: int): ... +def _enable_jit_rref_pickle(): ... +def _disable_jit_rref_pickle(): ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_functions.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_functions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..151c1077b1a368e63f5b6f5d3ccd4254c2269c36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_functions.pyi @@ -0,0 +1,11 @@ +from typing import AnyStr, List + +from torch import Tensor + +class UndefinedGrad: + def __init__(self) -> None: ... + def __call__(self, *inputs: Tensor) -> List[Tensor]: ... + +class DelayedError: + def __init__(self, msg: AnyStr, num_inputs: int) -> None: ... + def __call__(self, inputs: List[Tensor]) -> List[Tensor]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_nn.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_nn.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ec6da55327e8beaae6a52d48f4e94ae2f23e0cfb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_nn.pyi @@ -0,0 +1,86 @@ +# mypy: disable-error-code="type-arg" +from typing import List, Optional, overload, Sequence, Tuple, Union + +from torch import memory_format, Tensor +from torch.types import _bool, _device, _dtype, _int, _size + +# Defined in tools/autograd/templates/python_nn_functions.cpp + +def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ... +def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ... +def elu_(input: Tensor, alpha: float = ...) -> Tensor: ... +def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ... +def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ... +def gelu(input: Tensor, approximate: str = ...) -> Tensor: ... +def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ... +def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ... +def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ... +def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ... +def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ... +def log_sigmoid(input: Tensor) -> Tensor: ... +def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ... +def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ... +def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None) -> Tensor: ... +def softplus(input: Tensor, beta: int = ..., threshold: int = ...) -> Tensor: ... +def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ... + +# Defined in aten/src/ATen/native/mkldnn/Linear.cpp +def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ... + +# Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp +def mkldnn_reorder_conv2d_weight( + self: Tensor, + padding: List, + stride: List, + dilatation: List, + groups: int, +) -> Tensor: ... +def mkldnn_reorder_conv3d_weight( + self: Tensor, + padding: List, + stride: List, + dilatation: List, + groups: int, +) -> Tensor: ... + +# Defined in aten/src/ATen/native/mkldnn/Prelu.cpp +def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ... + +# Defined at tools/autograd/templates/python_nn_functions.cpp +@overload +def _parse_to( + device: _device, + dtype: _dtype, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... +@overload +def _parse_to( + dtype: _dtype, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... +@overload +def _parse_to( + tensor: Tensor, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... + +# Defined in aten/src/ATen/native/PadSequence.cpp +def pad_sequence( + sequences: List[Tensor], + batch_first: bool = False, + padding_value: float = ..., +) -> Tensor: ... +def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ... +def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_nvtx.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_nvtx.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f7ff779d8ad73cfb6189bb5c0c02127dc96988af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_nvtx.pyi @@ -0,0 +1,6 @@ +# Defined in torch/csrc/cuda/shared/nvtx.cpp +def rangePushA(message: str) -> int: ... +def rangePop() -> int: ... +def rangeStartA(message: str) -> int: ... +def rangeEnd(int) -> None: ... +def markA(message: str) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_onnx.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_onnx.pyi new file mode 100644 index 0000000000000000000000000000000000000000..376f461c358813fe6cc47aff7ff8946db57511fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_onnx.pyi @@ -0,0 +1,38 @@ +# Defined in torch/csrc/onnx/init.cpp + +from enum import Enum + +_CAFFE2_ATEN_FALLBACK: bool +PRODUCER_VERSION: str + +class TensorProtoDataType(Enum): + UNDEFINED = ... + FLOAT = ... + UINT8 = ... + INT8 = ... + UINT16 = ... + INT16 = ... + INT32 = ... + INT64 = ... + STRING = ... + BOOL = ... + FLOAT16 = ... + DOUBLE = ... + UINT32 = ... + UINT64 = ... + COMPLEX64 = ... + COMPLEX128 = ... + BFLOAT16 = ... + FLOAT8E5M2 = ... + FLOAT8E4M3FN = ... + +class OperatorExportTypes(Enum): + ONNX = ... + ONNX_ATEN = ... + ONNX_ATEN_FALLBACK = ... + ONNX_FALLTHROUGH = ... + +class TrainingMode(Enum): + EVAL = ... + PRESERVE = ... + TRAINING = ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C/_verbose.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_C/_verbose.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2388ce2bb8a5edd4c7640c374537e71607e5b72e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_C/_verbose.pyi @@ -0,0 +1,3 @@ +# Defined in torch/csrc/utils/verbose.cpp +def mkl_set_verbose(enable: int) -> int: ... +def mkldnn_set_verbose(level: int) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc7a7fed7512d4fb37b2a85fd41b862c65b57a03 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__init__.py @@ -0,0 +1,18 @@ +import torch + +from torch._subclasses.fake_tensor import ( + DynamicOutputShapeException, + FakeTensor, + FakeTensorMode, + UnsupportedFakeTensorException, +) + +from torch._subclasses.fake_utils import CrossRefFakeMode + +__all__ = [ + "FakeTensor", + "FakeTensorMode", + "UnsupportedFakeTensorException", + "DynamicOutputShapeException", + "CrossRefFakeMode", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..1938b114bdce905312aa92951c84444a463b4658 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py @@ -0,0 +1,1991 @@ +import contextlib +import functools +import itertools +import logging +import os +import sys +import traceback +import weakref +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union +from weakref import ReferenceType + +import torch +import torch._custom_op +import torch._logging + +from torch._guards import Source +from torch._ops import OpOverload +from torch._prims_common import ( + elementwise_dtypes, + ELEMENTWISE_TYPE_PROMOTION_KIND, + is_boolean_dtype, + is_float_dtype, + is_integer_dtype, +) +from torch._subclasses.meta_utils import MetaConverter +from torch._utils import render_call +from torch.fx.operator_schemas import normalize_function +from torch.multiprocessing.reductions import StorageWeakRef +from torch.overrides import TorchFunctionMode +from torch.utils._mode_utils import no_dispatch +from torch.utils._python_dispatch import ( + is_traceable_wrapper_subclass, + TorchDispatchMode, +) + +from torch.utils._pytree import PyTree, tree_map +from torch.utils._stats import count, count_label +from torch.utils.weak import WeakIdRef + +DimList = List + +log = logging.getLogger(__name__) +not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented") + +pytree = torch.utils._pytree +T = TypeVar("T") +TensorWeakRef = Any + +aten = torch._ops.ops.aten + +CONSTANT_NUMEL_LIMIT = 1 + +RECURSION_COUNT = 0 + + +# Small helper that increments recursion count, and +# resets it when the object goes out of scope. Useful +# if you don't want to increase indentation which is +# what a context manager would do. +class IncrementRecursionCount: + def __init__(self): + global RECURSION_COUNT + RECURSION_COUNT += 1 + + def __del__(self): + global RECURSION_COUNT + RECURSION_COUNT -= 1 + + +@dataclass +class UnsupportedFakeTensorException(RuntimeError): + reason: str + + +@dataclass +class DynamicOutputShapeException(RuntimeError): + func: OpOverload + + +@dataclass +class DataDependentOutputException(RuntimeError): + func: OpOverload + + +@dataclass +class UnsupportedOperatorException(RuntimeError): + func: OpOverload + + +_device_not_kwarg_ops = ( + aten._resize_output_.default, + aten._nested_tensor_from_tensor_list.default, + aten._nested_tensor_from_tensor_list.out, + aten.pin_memory.default, + aten.is_pinned.default, + aten.to.device, + aten.to.prim_Device, + aten._pin_memory.default, + aten._pin_memory.out, + aten._resize_output.default, + aten._resize_output.out, +) + +# this op is never actually used +_non_kwarg_device_constructors = (aten._list_to_tensor,) + + +# This function indicates if the backend device +# supports non-contiguous tensors +def is_noncontiguous_supported(device): + if device.type == "hpu": + return False + return True + + +def contains_tensor_types(type): + tensor_type = torch._C.TensorType.get() + return type.isSubtypeOf(tensor_type) or any( + contains_tensor_types(e) for e in type.containedTypes() + ) + + +_like_tensor_constructors = ( + aten.empty_like.default, + aten.empty_like.out, + aten.full_like.default, + aten.full_like.out, + aten.ones_like.default, + aten.ones_like.out, + aten.rand_like.default, + aten.rand_like.out, + aten.randn_like.default, + aten.randn_like.out, + aten.randint_like.default, + aten.randint_like.out, + aten.randint_like.low_dtype, + aten.randint_like.low_dtype_out, + aten.zeros_like.default, + aten.zeros_like.out, + aten.new_empty.default, + aten.new_empty.out, + aten.new_empty_strided.default, + aten.new_empty_strided.out, + aten.new_full.default, + aten.new_full.out, + aten.new_zeros.default, + aten.new_zeros.out, + aten.new_ones.default, + aten.new_ones.out, +) + + +@contextlib.contextmanager +def unset_fake_temporarily(): + old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) + try: + yield old + finally: + if old is not None: + torch._C._set_dispatch_mode(old) + + +@functools.lru_cache(None) +def _is_tensor_constructor(func: OpOverload): + assert isinstance(func, OpOverload) + schema = func._schema + if any(contains_tensor_types(arg.type) for arg in schema.arguments): + return False + # TODO: no real reason to restrict multiple outputs + return ( + len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get() + ) + + +def is_fake(x): + if isinstance(x, FakeTensor): + return True + if is_traceable_wrapper_subclass(x): + attrs, _ = type(x).__tensor_flatten__(x) + flattened_tensors = [getattr(x, attr) for attr in attrs] + # need to recurse because we could have nested subclasses + all_fake = all(is_fake(x) for x in flattened_tensors) + any_fake = any(is_fake(x) for x in flattened_tensors) + assert all_fake == any_fake, "got mixed fake and real tensors!" + return all_fake + elif isinstance(x, torch.Tensor) and torch._is_functional_tensor(x): + reapply_views = torch._C._functionalization_reapply_views_tls() + unwrapped = torch._C._functorch._unwrap_functional_tensor(x, reapply_views) + return is_fake(unwrapped) + return False + + +def maybe_get_fake_mode(t): + if isinstance(t, FakeTensor): + return t.fake_mode + if is_traceable_wrapper_subclass(t): + inner_tensor_names, _ = t.__tensor_flatten__() + modes = [ + maybe_get_fake_mode(getattr(t, t_name)) for t_name in inner_tensor_names + ] + m = modes[0] + assert all(m is x for x in modes) + return m + elif isinstance(t, torch.Tensor) and torch._is_functional_tensor(t): + reapply_views = torch._C._functionalization_reapply_views_tls() + unwrapped = torch._C._functorch._unwrap_functional_tensor(t, reapply_views) + return maybe_get_fake_mode(unwrapped) + return None + + +@functools.lru_cache(None) +def get_schema_info(func): + return torch._C._SchemaInfo(func._schema) # type: ignore[attr-defined] + + +# many of the decompositions registered to torch/_prims do not at the moment model +# aliasing or strides, so as an incremental step, just enable the decompositions in +# torch/_decomp/decompositions.py. +# decomps are used for aot autograd tracing so we would like to unify on their +# implementation and add additional testing to them +@functools.lru_cache(None) +def torch_decomp_decompositions(func): + from torch._decomp import decomposition_table + + decompositions = torch._decomp.decompositions + decomp_attrs = [getattr(decompositions, attr) for attr in dir(decompositions)] + return decomposition_table[func] in decomp_attrs + + +def tree_flatten_only(ty: Type[T], tree: PyTree): + flat_vals = pytree.tree_leaves(tree) + return [elem for elem in flat_vals if isinstance(elem, ty)] + + +# Similar to `MetaConverter`, this is a class for converting +# multiple tensors into fake tensors which share the same view/storage +# structure. Like `MetaConverter`, it uses `WeakIdRef` to +# hold a weak reference for all memoized tensors. +class FakeTensorConverter: + @property + def tensor_memo(self): + return self.meta_converter.tensor_memo + + meta_converter: MetaConverter + constant_storage_mapping: Dict[StorageWeakRef, List[ReferenceType]] + + def __init__(self): + self.meta_converter = MetaConverter() + + # map from to storage to corresponding constant tensors + self.constant_storage_mapping = {} + + def add_constant_storage_mapping(self, fake_tensor): + # when you have a constant, aliased tensor: + # const_tensor.add_(torch.rand([1])) + # all aliases of it must become no longer const + assert isinstance(fake_tensor, FakeTensor) and fake_tensor.constant is not None + weak_st = StorageWeakRef(fake_tensor.constant._typed_storage()) + + # we need a map from a weak storage to all of its corresponding + # constant tensors. python doesn't have the weak value equivalent + # of defaultdict(list), so we are using a WeakValueDictionary as one + if weak_st not in self.constant_storage_mapping: + self.constant_storage_mapping[weak_st] = [] + self.constant_storage_mapping[weak_st].append(weakref.ref(fake_tensor)) + + def invalidate_constant_aliases(self, tensor): + assert not isinstance(tensor, FakeTensor) + + weak_st = StorageWeakRef(tensor._typed_storage()) + if weak_st not in self.constant_storage_mapping: + return + + for weak_tensor_ref in self.constant_storage_mapping[weak_st]: + ten = weak_tensor_ref() + if ten is not None: + ten._fix_weakref() + ten.constant = None + + del self.constant_storage_mapping[weak_st] + + def _get_memo(self, t): + if WeakIdRef(t) in self.tensor_memo: + out = self.tensor_memo[WeakIdRef(t)] + out._fix_weakref() + return out + return None + + def set_tensor_memo(self, t, v): + th = WeakIdRef(t) + + # hold a weak ref to self, otherwise it will be kept alive + # by the del_ten closure + self_weak_ref = weakref.ref(self) + + def del_ten(): + self_ref = self_weak_ref() + if self_ref is None: + return + # on shutdown, th may not be in memo + self_ref.tensor_memo.pop(th, None) + + weakref.finalize(t, del_ten) + self.tensor_memo[th] = v + + def from_real_tensor( + self, + fake_mode, + t, + make_constant=False, + shape_env=None, + *, + source=None, + symbolic_context=None, + memoized_only=False, + ): + # see note [Tensor Fakification and Symbol Caching] + if not symbolic_context and not source and shape_env: + if tracing_context := torch._guards.TracingContext.try_get(): + if t in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[t] + source = symbolic_context.tensor_source + + maybe_memo = self._get_memo(t) + if maybe_memo is not None: + return maybe_memo + if memoized_only: + return None + existing_device = t.device + # not yet supported in metatensors + if t.is_quantized: + raise UnsupportedFakeTensorException("quantized nyi in meta tensors") + if type(t) is torch.nn.Parameter: + assert not make_constant + + def mk_fake_tensor(make_meta_t): + # NB: don't use in_kernel_invocation_manager. to + # ensure FakeTensor can internally do constant computation + # as necessary. Invocation manager is "more correct" as + # it works for more operators in make_meta_t, but + # invariant is that make_meta_t only calls factories + # for which it is not strictly necessary to use the + # invocation manager (I think!) + with no_dispatch(): + return FakeTensor( + fake_mode, + make_meta_t(), + existing_device, + constant=t if make_constant else None, + ) + + out = self.meta_converter( + t, + shape_env=shape_env, + callback=mk_fake_tensor, + source=source, + symbolic_context=symbolic_context, + ) + if out is NotImplemented: + raise UnsupportedFakeTensorException("meta converter nyi") + if make_constant: + self.add_constant_storage_mapping(out) + # NB: meta_converter set the memo + return out + + # If you specify the device, it MUST be a meta tensor. + def from_meta_and_device(self, fake_mode, t, device): + assert ( + t.device.type == "meta" + ), f"tensor's device must be `meta`, got {t.device.type} instead" + maybe_memo = self._get_memo(t) + if maybe_memo is not None: + return maybe_memo + out = FakeTensor(fake_mode, t, device) + self.set_tensor_memo(t, out) + return out + + # You can have a real tensor that you need to convert into a fake tensor. + # If you have a meta tensor already, call from_meta_and_device. + # + # You're allowed to pass a meta tensor to be turned into a fake + # tensor; although an odd thing to do, this can occur if you're doing + # cross ref testing and the inner test is already operating on meta tensors. + def __call__( + self, + fake_mode, + t, + *, + make_constant=False, + shape_env=None, + source=None, + symbolic_context=None, + memoized_only=False, + ): + return self.from_real_tensor( + fake_mode, + t, + make_constant, + shape_env=shape_env, + source=source, + symbolic_context=symbolic_context, + memoized_only=memoized_only, + ) + + +op_implementations = [] + + +def register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]): + def impl_decorator(op_impl): + global op_implementations + if isinstance(run_impl_check, OpOverload): + op_implementations.append((lambda func: func == run_impl_check, op_impl)) + else: + op_implementations.append((run_impl_check, op_impl)) + + return op_impl + + return impl_decorator + + +@register_op_impl( + lambda func: (_is_tensor_constructor(func) or func in _like_tensor_constructors) +) +def constructors(fake_mode, func, *args, **kwargs): + assert func not in _non_kwarg_device_constructors + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + if func in _like_tensor_constructors: + default_device = new_kwargs["input"].device + # TODO: file issue + args = (new_kwargs.pop("input"),) + else: + # cpu is default device if none is specified + default_device = torch.device("cpu") + args = () + out_device = new_kwargs.pop("device", None) + out_device = out_device if out_device is not None else default_device + new_kwargs["device"] = torch.device("meta") + # _like constructors have fake tensor inputs (maybe this causes the non-like + # to fail? hmmm) + with in_kernel_invocation_manager(fake_mode): + r = func(*args, **new_kwargs) + return FakeTensor(fake_mode, r, out_device) + + +@register_op_impl(lambda func: func in (aten.to.prim_Device, aten.to.device)) +def non_kwarg_to(fake_mode, func, *args, **kwargs): + _, new_kwargs = normalize_function( + func, args, kwargs, normalize_to_only_use_kwargs=True + ) + input_device = new_kwargs["device"] + out_device = input_device if input_device else new_kwargs["input"].device + new_kwargs["device"] = torch.device("meta") + inp = new_kwargs.pop("input") + with in_kernel_invocation_manager(fake_mode): + r = func(inp, **new_kwargs) + # TODO: I think this does the wrong thing if r is inp + return fake_mode.fake_tensor_converter.from_meta_and_device( + fake_mode, r, out_device + ) + + +def stride_incorrect_op(op): + if op.namespace not in ("aten", "prims"): + return False + if op is aten._fft_c2c.default: + return False + + op_name = op.name() + if "fft" in op_name: + return True + return False + + +# These operators have meta implementations with incorrect strides +@register_op_impl(stride_incorrect_op) +def wordaround_stride_incorrect_op(fake_mode, func, *args, **kwargs): + # This is a workaround for meta implmentations with incorrect strides + + def is_symbolic(x): + if isinstance(x, FakeTensor): + return x._has_symbolic_sizes_strides + if isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool)): + return True + return False + + # For static shapes, we can fall back to eager for the real strides + if fake_mode.allow_fallback_kernels: + require_dynamic = any( + is_symbolic(x) for x in itertools.chain(args, kwargs.values()) + ) + if not require_dynamic: + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + return run_fallback_kernel(fake_mode, func, flat_args, args_spec, None) + + raise UnsupportedOperatorException(func) + + +# Dont default to default device handling, +# since the device of `the_template` is ignored +@register_op_impl(aten.resize_as_.default) +def resize_as_(fake_mode, func, *args, **kwargs): + with in_kernel_invocation_manager(fake_mode): + return func(*args, **kwargs) + + +@register_op_impl(aten._sparse_coo_tensor_with_dims_and_tensors.default) +def _sparse_coo_tensor_with_dims_and_tensors(fake_mode, func, *args, **kwargs): + # TODO: remove me + return constructors(fake_mode, func, *args, **kwargs) + + +# index.Tensor data-dependent in only some conditions +@register_op_impl( + lambda func: torch.Tag.dynamic_output_shape in func.tags + and func + not in [aten.index.Tensor, aten.nonzero.default, aten.repeat_interleave.Tensor] +) +def dyn_shape(fake_mode, func, *args, **kwargs): + raise DynamicOutputShapeException(func) + + +@register_op_impl(lambda func: func is aten.repeat_interleave.Tensor) +def repeat_interleave_tensor(fake_mode, func, repeats, output_size=None): + if output_size is None: + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + raise DynamicOutputShapeException(func) + + output_size = fake_mode.shape_env.create_unbacked_symint() + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import _constrain_range_for_size + + _constrain_range_for_size(output_size) + # TODO: consider a memo + return repeats.new_empty(output_size) + + +@register_op_impl(lambda func: func is torch.ops.aten._local_scalar_dense.default) +def local_scalar_dense(fake_mode, func, arg): + if fake_mode.shape_env is None or not fake_mode.shape_env.allow_scalar_outputs: + # Without symints/symfloats, cannot handle this + raise DataDependentOutputException(func) + if is_float_dtype(arg.dtype): + return fake_mode.shape_env.create_unbacked_symfloat() + elif is_integer_dtype(arg.dtype): + return fake_mode.shape_env.create_unbacked_symint() + elif is_boolean_dtype(arg.dtype): + return fake_mode.shape_env.create_unbacked_symbool() + else: + raise NotImplementedError(f"local_scalar_dense/item NYI for {arg.dtype}") + + +@register_op_impl(lambda func: func is torch.ops.aten.nonzero.default) +def nonzero(fake_mode, func, arg): + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + # Without symints/symfloats, cannot handle this + raise DynamicOutputShapeException(func) + + if arg.nonzero_memo is None: + nnz = fake_mode.shape_env.create_unbacked_symint() + + # This is unsound, but it works well in practice + # See https://docs.google.com/document/d/1lFRYAJo5nrfxRhwIzGnfi2pbLpU6T4ytSRSuLJ5qebI/edit# + # TODO: Add a config knob to turn off this unsound behavior + # + # NB: If numel < 2, the bounds here might be COMPLETELY + # disjoint with what can actually occur. But this is fine: + # remember, the hypothesis is that if your later code works + # with N >= 2, it will work with N = 1 and N = 0. + maxval = sys.maxsize - 1 + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import ( + _constrain_range_for_size, + has_free_symbols, + ) + + if not has_free_symbols(arg.numel()): + # Don't upgrade the range if numel is less than two, since we then + # have an empty range which makes things go explodey. We also + # don't allow for 2 because that would specialize the unbacked + # SymInt to 2, which is also likely to be buggy. + if arg.numel() > 2: + maxval = int(arg.numel()) + + _constrain_range_for_size(nnz, max=maxval) + + arg._nonzero_memo = nnz + arg._nonzero_memo_vc = arg._version + + return arg.new_empty((arg.nonzero_memo, arg.dim()), dtype=torch.int64) + + +@register_op_impl(lambda func: func is torch.ops.aten.masked_select.default) +def masked_select(fake_mode, func, self, mask): + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + # Without symints/symfloats, cannot handle this + raise DynamicOutputShapeException(func) + + nnz = fake_mode.shape_env.create_unbacked_symint() + + # see nonzero for commentary + maxval = sys.maxsize - 1 + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import ( + _constrain_range_for_size, + has_free_symbols, + ) + + if not has_free_symbols(arg.numel()): + if arg.numel() >= 2: + maxval = int(arg.numel()) + + _constrain_range_for_size(nnz, max=maxval) + + return self.new_empty((nnz,)) + + +# NB: this must be ordered after local_scalar_dense +@register_op_impl(lambda func: torch.Tag.data_dependent_output in func.tags) +def data_dep(fake_mode, func, *args, **kwargs): + raise DataDependentOutputException(func) + + +# Bool Indices get Expanded as Masks +# See: IndexingUtils.h:expandTensors +def check_no_bool_index_tensors(func, self, indices): + for index in indices: + if index is not None and index.dtype in (torch.bool, torch.uint8): + raise DynamicOutputShapeException(func) + + +def run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs): + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + out_device = new_kwargs["input"].device + with in_kernel_invocation_manager(fake_mode): + out = func(*args, **kwargs) + if not is_noncontiguous_supported(out_device): + out = out.new_empty(out.shape) + + if out is new_kwargs["input"]: + return out # copy_ + return FakeTensor(fake_mode, out, out_device) + + +# Dont default to default device handling, +# Since op can take in non-zero sized cpu +# index tensors with cuda self +@register_op_impl(aten.index.Tensor) +def index_tensor(fake_mode, func, *args, **kwargs): + from torch._meta_registrations import meta_index_Tensor + + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + out_device = new_kwargs["input"].device + # ensure nonzero call goes to fake tensor + with fake_mode: + out = meta_index_Tensor(*args, **kwargs) + return out.to(out_device) + + +# Can take mixed meta/non-meta arguments; the meta registration +# will roughly do the right thing even when given real devices +@register_op_impl(aten._embedding_bag.default) +def embedding_bag(fake_mode, func, *args, **kwargs): + from torch._meta_registrations import meta_embedding_bag + + with fake_mode: + return meta_embedding_bag(*args, **kwargs) + + +# takes in multiple-devices, dont default to default device handling +@register_op_impl(aten._unsafe_index_put.default) +@register_op_impl(aten.copy.default) +@register_op_impl(aten.copy_.default) +@register_op_impl(aten.slice_scatter.default) +def multi_device_op_default(fake_mode, func, *args, **kwargs): + return run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs) + + +# same with multi_device_op_default, but return the input +@register_op_impl(aten.copy.out) +@register_op_impl(aten.slice_scatter.out) +def multi_device_op_out(fake_mode, func, *args, **kwargs): + with in_kernel_invocation_manager(fake_mode): + out = func(*args, **kwargs) + + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + return new_kwargs["input"] + + +@register_op_impl(aten.index_put.default) +@register_op_impl(aten.index_put_.default) +def index_put_impl(fake_mode, func, *args, **kwargs): + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + + values = new_kwargs["values"] + self_device = new_kwargs["input"].fake_device + torch._check( + self_device == values.fake_device or (values.ndim == 0 and values.numel() == 1), + lambda: f"Mismatching {func} device between self ({self_device}) and values ({values.device})", + ) + + out = run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs) + if func is aten.index_put_.default: + return new_kwargs["input"] + else: + return out + + +@register_op_impl(lambda fn: fn in _device_not_kwarg_ops) +def nyi(fake_mode, func, *args, **kwargs): + assert func not in _device_not_kwarg_ops, f"NYI: {func}" + + +@register_op_impl( + lambda func: func in (aten.convolution.default, aten.convolution_backward.default) +) +def conv(fake_mode, func, *args, **kwargs): + _, kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + device = kwargs["input"].fake_device + # need to re-enable mode so the tensors report fake device + with fake_mode: + # if the input is unsqueezed is done in Convolution.cpp we get segfault + k = kwargs["weight"].ndim + batch = kwargs["input"].shape[0] + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import has_hint + + if not has_hint(batch): + # TODO: We can make this a little more faithful with best effort + # channels last detection (but only if it's statically obvious!) + mem_fmt = None + elif k == 3 and not kwargs["input"].is_mkldnn and not kwargs["input"].is_xpu: + mem_fmt = None + else: + if func is aten.convolution.default: + conv_backend = torch._C._select_conv_backend(**kwargs) + else: + conv_backend = torch._C._select_conv_backend( + kwargs["input"], + kwargs["weight"], + bias=None, + stride=kwargs["stride"], + padding=kwargs["padding"], + dilation=kwargs["dilation"], + transposed=kwargs["transposed"], + output_padding=kwargs["output_padding"], + groups=kwargs["groups"], + bias_sizes=kwargs["bias_sizes"], + ) + mem_fmt = torch._C._conv_determine_backend_memory_format( + kwargs["input"], kwargs["weight"], conv_backend + ) + + def convert(t, mem_fmt): + if t is None: + return t + if mem_fmt is not None: + t = t.to(memory_format=mem_fmt) + return FakeTensor(fake_mode, t, device) + + with in_kernel_invocation_manager(fake_mode): + out = func(**kwargs) + + if func is aten.convolution.default: + return convert(out, mem_fmt) + else: + return ( + convert(out[0], mem_fmt), + convert(out[1], mem_fmt), + convert(out[2], None), + ) + + +FAST_OP_IMPLEMENTATIONS = {} + + +# Unlike register_op_impl, these don't do the slow iteration for +# run_impl_check, and these run BEFORE decompositions +def register_fast_op_impl(func: OpOverload): + def impl_decorator(op_impl): + FAST_OP_IMPLEMENTATIONS[func] = op_impl + return op_impl + + return impl_decorator + + +# infer_size_impl in ExpandUtils +def infer_size(a, b): + dimsA = len(a) + dimsB = len(b) + ndim = max(dimsA, dimsB) + expandedSizes = [0] * ndim + for i in range(ndim - 1, -1, -1): + offset = ndim - 1 - i + dimA = dimsA - 1 - offset + dimB = dimsB - 1 - offset + sizeA = a[dimA] if dimA >= 0 else 1 + sizeB = b[dimB] if dimB >= 0 else 1 + + # NB: It is very important to test for broadcasting, before testing + # sizeA == sizeB. This is because the broadcasting tests are likely + # to be statically known (in particular, if sizeA/sizeB is unbacked + # but size-like, we will unsoundly assume they never equal 1), but + # the sizeA == sizeB test may not be statically known. However, once + # we have established that no broadcasting is happening, the + # sizeA == sizeB is now expect_true and we can defer it as a runtime + # assert (this works because Python will return the terminal + # expression of an or statement as-is, without bool()'ing it; if this + # were not the case, we'd need to write this using torch.sym_or() or + # something like that). + torch._check( + sizeA == 1 or sizeB == 1 or sizeA == sizeB, + lambda: f"The size of tensor a ({sizeA}) " + f"must match the size of tensor b ({sizeB}) " + f"at non-singleton dimension {i})", + ) + expandedSizes[i] = sizeB if sizeA == 1 else sizeA + return tuple(expandedSizes) + + +def make_fast_binary_impl(slow_ref): + def fast_binary_impl(mode, *args, **kwargs): + def slow(msg): + count_label(f"slow {msg}") + with mode: + return slow_ref(*args, **kwargs) + + count_label("attempt fast") + + # Fast path (based off of TensorIterator fast path). + # Unfortunately, there is no way to easily deduplicate + # this with either the TensorIterator C++ implementation + # (which we don't want to SymIntify, and also the algorithm + # here is slightly different from TensorIterator to allow + # for broadcasting), nor the PrimTorch implementation + # (which does not actually implement a fast path.) + + operands = args + + # compute_shape + has_scalars = False + has_tensors = False + final_shape = None + for op in operands: + shape = op.shape if isinstance(op, torch.Tensor) else () + if len(shape) == 0: + has_scalars = True + else: + has_tensors = True + if final_shape is None: + final_shape = shape + # TODO: Minor optimization: track if the shapes + # were equal so you can skip the equality check + # below if unnecessary + final_shape = infer_size(final_shape, shape) + assert final_shape is not None + + # Do some extra safety checks to see if the output + # stride is obvious + for op in operands: + if isinstance(op, torch.Tensor) and op.shape == final_shape: + break + else: + return slow("both tensors nontrivially broadcast") + + # compute_types + cpu = torch.device("cpu") + common_device = cpu + common_dtype = None + output_dtype = None + has_different_input_dtypes = False + for op in operands: + if not isinstance(op, torch.Tensor): + # Use elementwise_dtypes for the tricky case + has_different_input_dtypes = True + continue + if common_device == cpu and not op.device.type == "cpu": + common_device = op.device + # Slightly simplified here as target_dtype cannot vary + if common_dtype is None: + common_dtype = op.dtype + elif common_dtype != op.dtype: + has_different_input_dtypes = True + + if has_different_input_dtypes: + # compute promotion + # TODO: we don't need the compute type + _, common_dtype = elementwise_dtypes( + *operands, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + + # check all tensors on same device + # cpu scalars are assumed allow + current_cpu_scalars_on_non_cpu = 0 + max_cpu_scalars_on_non_cpu = 1 # hard coded atm + for op in operands: + if not isinstance(op, torch.Tensor): + continue + if common_device != cpu and op.dim() == 0 and op.device == cpu: + if current_cpu_scalars_on_non_cpu >= max_cpu_scalars_on_non_cpu: + return slow("error") + current_cpu_scalars_on_non_cpu += 1 + elif op.device != common_device: + return slow("error") + + # compute_fast_setup_type + is_contiguous = True + is_channels_last = True + # TODO: is_non-overlapping_and_dense (not bound from Python + # no inplace, no out, everything defined + + if is_noncontiguous_supported(common_device): + for op in operands: + if not isinstance(op, torch.Tensor): + continue + is_contiguous = is_contiguous and op.is_contiguous( + memory_format=torch.contiguous_format + ) + is_channels_last = is_channels_last and op.is_contiguous( + memory_format=torch.channels_last + ) + if is_contiguous: + # do contiguous + count_label("fast is_contiguous") + return FakeTensor( + mode, + torch.empty( + final_shape, + dtype=common_dtype, + device="meta", + memory_format=torch.contiguous_format, + ), + device=common_device, + ) + if is_channels_last: + count_label("fast channels_last") + # do channels last + return FakeTensor( + mode, + torch.empty( + final_shape, + dtype=common_dtype, + device="meta", + memory_format=torch.channels_last, + ), + device=common_device, + ) + + return slow("no contiguity match") + + return fast_binary_impl + + +@functools.lru_cache(None) +def get_fast_op_impls(): + import torch._refs + + register_fast_op_impl(torch.ops.aten.add.Tensor)( + make_fast_binary_impl(torch._refs.add) + ) + register_fast_op_impl(torch.ops.aten.sub.Tensor)( + make_fast_binary_impl(torch._refs.sub) + ) + register_fast_op_impl(torch.ops.aten.mul.Tensor)(make_fast_binary_impl(torch._refs.mul)) # type: ignore[has-type] + register_fast_op_impl(torch.ops.aten.div.Tensor)( + make_fast_binary_impl(torch._refs.div) + ) + return FAST_OP_IMPLEMENTATIONS + + +@functools.lru_cache(None) +def init_cuda_context(): + # Backward will error with cuda Fake Tensors if no cuda tensors have been initialized first + if torch.cuda.is_available(): + torch.empty(1, device="cuda") if torch.version.hip is None else torch.zeros( + 1, device="cuda" + ) + + +@contextlib.contextmanager +def in_kernel_invocation_manager(fake_mode): + # See: note [Fake Tensor Dispatch Keys] + prev_in_kernel = fake_mode.in_kernel_invocation + meta_in_tls = torch._C._meta_in_tls_dispatch_include() + assert meta_in_tls == prev_in_kernel, f"{meta_in_tls}, {prev_in_kernel}" + + guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined] + fake_mode.in_kernel_invocation = True + torch._C._set_meta_in_tls_dispatch_include(True) + try: + yield + finally: + fake_mode.in_kernel_invocation = prev_in_kernel + torch._C._set_meta_in_tls_dispatch_include(prev_in_kernel) + del guard + + +# Return if the function allows Python numbers to bind to Tensors +def should_allow_numbers_as_tensors(func: OpOverload): + return torch._C._should_allow_numbers_as_tensors( + func.name().split("::")[-1].split(".")[0] + ) + + +class FakeTensorConfig: + debug = os.environ.get("TORCH_FAKE_TENSOR_DEBUG", False) + + +class FakeTensor(torch.Tensor): + """ + Meta tensors give you the ability to run PyTorch code without having to + actually do computation through tensors allocated on a `meta` device. + Because the device is `meta`, meta tensors do not model device propagation. + FakeTensor extends MetaTensors to also carry an additional `fake_device` + which tracks devices that would have been used. + """ + + fake_device: torch.device + fake_mode: "FakeTensorMode" + constant: Optional[torch.Tensor] + + # This memorizes the unbacked SymInt representing the number of nonzero + # elements in this tensor. This is helpful if you do something like + # x[mask] and y[mask]; mask.nonzero() gets repeatedly called and should + # give a consistent unbacked SymInt. It needs to be invalidated in the + # same way constant is. + # TODO: Generalize this as needed, e.g., into a trie of memos + _nonzero_memo: Optional[torch.SymInt] + _nonzero_memo_vc: Optional[int] + + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + _mode_key = torch._C._TorchDispatchModeKey.FAKE + + @property + def nonzero_memo(self): + if self._nonzero_memo is None: + return None + # Version counter based tracking isn't 100% sound but it's close + # enough + if self._nonzero_memo_vc != self._version: + self._nonzero_memo = None + return None + return self._nonzero_memo + + @property + def device(self): + if self.fake_mode.in_kernel_invocation: + return torch.device("meta") + else: + return self.fake_device + + # Note: [Fake Tensor Dispatch Keys] + # In order to model the behavior of device-specific autocast + # and autograd logic, we update the dispatch keys of FakeTensors + # to reflect their fake device. This includes the BackendComponent + # (DispatchKey::Meta -> DispatchKey::CUDA), and also the BackendComponent + # related Autocast and Autograd keys. __torch__dispatch__ sits below + # Autocast and Autograd, and is only invoked when we are at the + # kernel for the BackendComponent. Then, we add Meta to the + # thread-local dispatch include set to hit the meta kernel + # instead of the kernel of the BackendComponent for the fake device. + # The `device_for_backend_keys` does that below + # NOTE: this probably will not do the right thing for backends + # that have dispatch keys which are higher than the "meta" key: + # https://github.com/pytorch/pytorch/blob/main/c10/core/DispatchKey.h#L189 + + @staticmethod + def __new__(cls, fake_mode, elem, device, constant=None): + self = torch.Tensor._make_subclass( + cls, + elem, + elem.requires_grad, + dispatch_device=True, + device_for_backend_keys=device, + ) + + assert elem.device.type == "meta", elem.device.type + device = device if isinstance(device, torch.device) else torch.device(device) + # NB: it is fine, if a little confusing, for device to be meta + # (we are faking a meta tensor in that case). However, it often + # indicates some sort of confusion (e.g., you accidentally passed + # in a meta tensor when you should have passed in the real tensor). + # So by default we disallow meta, and if you are working in a situation + # where it is helpful (e.g., crossref testing) you can turn it back + # on + if not fake_mode.allow_meta: + assert device.type != "meta" + # normalize device. + if device.type == "cuda": + init_cuda_context() + + if ( + device.type + in ["cuda", "hpu", "xpu", torch._C._get_privateuse1_backend_name()] + and device.index is None + ): + device = torch.device( + f"{device.type}:{getattr(torch, device.type).current_device()}" + ) + self.fake_device = device # type: ignore[attr-defined] + self.fake_mode = fake_mode # type: ignore[attr-defined] + self.constant = constant # type: ignore[attr-defined] + self._nonzero_memo = None # type: ignore[attr-defined] + self._nonzero_memo_vc = None # type: ignore[attr-defined] + + if FakeTensorConfig.debug: + import traceback + + self._debug_trace = traceback.extract_stack() # type: ignore[attr-defined] + return self + + # In some circumstances, a conventional torch.Tensor constructor + # will get rewritten to call into FakeTensor. We must provide an + # __init__ method that can accept the Python interpreters initialization + # in such a situation; we must also be able to handle direct fake + # tensor construction via FakeTensor(). + # + # In particular, the __init__ call will look funny in the following case: + # + # with FakeTensorMode(): + # x = torch.Tensor([1, 2, 3]) + # + # this desugars into: + # + # with FakeTensorMode(): + # x = torch.Tensor.__new__([1, 2, 3]) + # # NB: x is a fake tensor, because of the mode! + # x.__init__([1, 2, 3]) # not the normal fake tensor args! + # + def __init__(self, *args, **kwargs): + super().__init__() + + @staticmethod + def from_tensor(t, fake_mode): + return fake_mode.from_tensor(t) + + @classmethod + @count + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + # need to handle here to avoid infinite recursion + # see [in_kernel_invocation] + if func == torch.ops.prim.device.default: + assert len(args) == 1 and isinstance(args[0], FakeTensor) + if args[0].fake_mode.in_kernel_invocation: + return torch.device("meta") + else: + return args[0].fake_device + + # Because fake mode can return NotImplemented (if it sees a subclass + # it doesn't know how to deal with), this test here is important + # because the next dispatch after a fake mode will attempt to use + # subclasses of tensors to dispatch, and any FakeTensor arguments + # will be considered eligible. + unrecognized_types = [ + t for t in types if not issubclass(t, FakeTensor) and t is not torch.Tensor + ] + if unrecognized_types: + not_implemented_log.debug( + "FakeTensor unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + fake_mode = None + for arg in pytree.arg_tree_leaves(*args, **kwargs): + if isinstance(arg, FakeTensor): + fake_mode = arg.fake_mode + break + + assert fake_mode is not None + + # If the fake mode is already active, don't try to reapply it! + # NotImplemented is the right thing to return here, because the + # typical situation this can occur is if ProxyTensorMode returned a + # NotImplemented because of a not implemented subclass; we may have + # unluckily attempted to hit FakeTensor's dispatch first, + # NotImplemented lets us keep chaining until we find the actual + # subclass + maybe_cur_fake_mode = torch._C._get_dispatch_mode( + torch._C._TorchDispatchModeKey.FAKE + ) + if maybe_cur_fake_mode: + not_implemented_log.debug( + "FakeTensor mode already active: %s in %s", + fake_mode, + maybe_cur_fake_mode, + ) + return NotImplemented + + with fake_mode: # type: ignore[attr-defined] + return func(*args, **kwargs) + + @staticmethod + def _find_common_device(func, flat_args) -> Tuple[torch.device, bool]: + # Returns: (common_device, has_scalar_only_inputs) + + # cpu - zero-dim tensors can be called in cuda kernels, + # so overwrite the common_device if it the only existing + # device comes from a cpu zero-dim tensor + common_device = None + has_scalar_only_inputs = False + is_cpu_zero_dim = None + + def cpu_zero_dim(t): + return t.device.type == "cpu" and t.dim() == 0 + + def merge_devices(t): + nonlocal common_device + nonlocal is_cpu_zero_dim + if not isinstance(t, FakeTensor): + return + + if common_device is None: + common_device = t.device + is_cpu_zero_dim = cpu_zero_dim(t) + return + + t_is_cpu_zero_dim = cpu_zero_dim(t) + if t.device == common_device: + if is_cpu_zero_dim: + is_cpu_zero_dim = t_is_cpu_zero_dim + return + + # mismatching devices ! + # if current tensor is cpu 0 dim, defer to existing device + if t_is_cpu_zero_dim: + return + + # current device is from cpu 0 dim tensor, overwrite + if is_cpu_zero_dim: + common_device = t.device + is_cpu_zero_dim = t_is_cpu_zero_dim + return + + # mismatching devices of non-zero dim tensors, throw + # This might be valid behavior and need to be explicitly modeled, e.g. reshape_as + raise RuntimeError( + f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}" + ) + + for arg in flat_args: + merge_devices(arg) + + # some functions that allow Python numbers to bind to Tensors + # if we have failed to find a device, and we're running one of these operators, + # we must have scalar only inputs + if should_allow_numbers_as_tensors(func) and common_device is None: + # ops with scalar only inputs always have result on cpu + has_scalar_only_inputs = True + common_device = torch.device("cpu") + + assert common_device is not None, f"Could not find common device for {func}" + + return common_device, has_scalar_only_inputs + + # We must handle tolist in a special way for FakeTensors here in the case + # where tolist is called from torch dispatch for tensor subclasses. + # Ordinarily, if a program calls .tolist compiling still works because there is + # special handling in dynamo, but for tensor subclasses if .tolist is called + # inside torch dispatch, the .tolist call may be directly on a FakeTensor. + # This would result in an error since wrapper subclasses don't have storage. + # To avoid this, we handle the FakeTensor case by (1) specializing on the size + # of the tensor to create the output Python list, and (2) creating unbacked + # symints for each element of the list. + def tolist(self): + assert self.dim() == 1, "NYI for higher dims" + shape_env = self.fake_mode.shape_env + out = [] + # Specialize on the length of the list + for _ in range(self.shape[0]): + s = shape_env.create_unbacked_symint() + # max value? + torch._constrain_as_size(s, min=2) + out.append(s) + return out + + __torch_function__ = torch._C._disabled_torch_function_impl + + +# We keep one instantiation of `fake_tensor_converter` active +# for the duration of `with FakeTensorMode()`. +# This allows accurate storage aliasing across invocation of +# different operators. While this will keep all freshly allocated +# tensors alive during `FakeTensorMode`, there will no be no +# new allocations of Tensors which have non-meta storage so +# memory should not significantly increase. + + +class FakeTensorMode(TorchDispatchMode): + def __init__( + self, + *, + allow_fallback_kernels=True, + allow_non_fake_inputs=False, + shape_env=None, + static_shapes=None, + ): + log.debug("create_mode 0x%x", id(self)) + self.allow_fallback_kernels = allow_fallback_kernels + self.fake_tensor_converter = FakeTensorConverter() + if static_shapes is not None: + self.static_shapes = static_shapes + else: + self.static_shapes = shape_env is None + + import torch._functorch.config + + self.allow_meta = torch._functorch.config.fake_tensor_allow_meta + + # A flag that controls, whether we want to invoke ops on mix of + # real weights/global variables and fake inputs + self.allow_non_fake_inputs = allow_non_fake_inputs + + # [in_kernel_invocation] + # when FakeTensor is invoked in user code, .device should return + # the fake_device of the tensor so that code such as as `if x.is_cuda` + # or torch.zeros([10, 10], device=x.device) continues to execute as if + # the FakeTensor were real. However, within kernel execution, we return + # the `Meta` device because all computation within the kernels should + # behave as if the Tensors are on meta devices. Kernels should allocate + # new tensors on meta devices, and checks like `is_meta` should return true. + # within python refs, we always return the real device by defining + # the device property + self.in_kernel_invocation = False + + # True if we enter'ed and actually enabled fake tensor mode, + # false if it was a no-op. Not thread safe but neither is + # in_kernel_invocation + # If another fake mode was already active when we enter, we also stash it here. + # That way when we exit, we know to re-enable the previous fake mode. + self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = [] + + self.shape_env = shape_env + + self.stack = "".join(traceback.format_stack()) + + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + self._mode_key = torch._C._TorchDispatchModeKey.FAKE + + # Typically, there is only one fake tensor mode and you test for it by + # doing an isinstance test. However, in some situations, there might be + # TWO fake tensor modes. The canonical example of this is exporting + # a fake model: there is an outer fake mode created by the user, and + # an inner fake mode created by Dynamo. The two phase process is required + # because the outer fake mode typically won't have a ShapeEnv, even if + # the user is interested in exporting with dynamic shapes (so the inner + # fake mode will actually have a ShapeEnv and swap in symbolic sizes.) + # + # In this case, it's insufficient to test only one FakeTensor: you need + # to distinguish between our fake tensor and other fake tensors. That's + # what this function does. + def is_our_fake(self, t): + return isinstance(t, FakeTensor) and t.fake_mode is self + + @count + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + # FakeTensorMode should not be set when we're inside of it. + assert ( + torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) is None + ), func + try: + return self.dispatch(func, types, args, kwargs) + except TypeError: + log.exception("fake tensor raised TypeError") + raise + + # No-op if FakeTensorMode is already in use + def __enter__(self): + maybe_prev_fake_mode = torch._C._unset_dispatch_mode(self._mode_key) + if self is not maybe_prev_fake_mode: + self.enter_stack.append((True, maybe_prev_fake_mode)) + return super().__enter__() + else: + # no-op (still need to re-set the fake mode though since we unset it) + torch._C._set_dispatch_mode(self) + self.enter_stack.append((False, None)) + return self + + def __exit__(self, a, b, c): + live, maybe_prev_fake_mode = self.enter_stack.pop() + if live: + out = super().__exit__(a, b, c) + # Re-enable the previous fake mode, if there was one. + if maybe_prev_fake_mode is not None: + torch._C._set_dispatch_mode(maybe_prev_fake_mode) + + def dispatch(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + log.debug("%s %s %s", func, args, kwargs) + + if func == torch.ops.prim.device.default: + # NB: Don't use is_our_fake, just serve the fake information + # as is. Notice we don't use 'self'; we use args[0].fake_mode + # because they may not be the same. It would also be possible + # to return NotImplemented here, in which case the FakeTensor + # handler on args[0] would handle it, but we're being nice and + # short-circuiting quickly. + assert len(args) == 1 and isinstance(args[0], FakeTensor) + if args[0].fake_mode.in_kernel_invocation: + return torch.device("meta") + else: + return args[0].fake_device + elif func is torch.ops.aten.size.default: + return tuple(int(s) for s in args[0].size()) + elif func is torch.ops.aten.stride.default: + return tuple(int(s) for s in args[0].stride()) + elif func is torch.ops.aten.storage_offset.default: + return int(args[0].storage_offset()) + + if log.getEffectiveLevel() <= logging.DEBUG: + log.debug( + "%sFakeTensorMode.__torch_dispatch__: %s", " " * RECURSION_COUNT, func + ) + incr = IncrementRecursionCount() + + # Some attribute queries that can be serviced directly + # See Note [is_coalesced is dispatched] + if func in { + torch.ops.aten.is_coalesced.default, + torch.ops.aten.dense_dim.default, + torch.ops.aten.sparse_dim.default, + }: + # NB: no_dispatch is ok here too, this func is very simple + with in_kernel_invocation_manager(self): + return func(*args, **kwargs) + + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + + flat_arg_fake_tensors = [ + t for t in flat_args if isinstance(t, FakeTensor) and self.is_our_fake(t) + ] + has_symbolic_sizes = any( + i._has_symbolic_sizes_strides for i in flat_arg_fake_tensors + ) or any(isinstance(a, torch.SymInt) for a in flat_args) + + converter = self.fake_tensor_converter + + def maybe_to_constant(t): + if isinstance(t, FakeTensor) and self.is_our_fake(t): + return t.constant + else: + return t + + # To constant propagate through these functions: + # 1, If this is a lift due to a torch.tensor call, + # the input tensor is guaranteed to be a + # constant, so we keep a copy of the original argument along so + # we can query it if we're asked to item() it at some later point. + # (Note that you can always call a lift fn manually, so we do + # have to check if there are any fake tensors!) + # 2, Some functions that allow Python numbers to bind to Tensors, e.g, torch.div + if (func in self.lift_fns and not flat_arg_fake_tensors) or ( + should_allow_numbers_as_tensors(func) + and not has_symbolic_sizes + and not flat_arg_fake_tensors + ): + assert all( + t.constant is not None for t in flat_arg_fake_tensors + ), f"{func} should not have fake inputs without constants" + const_flat_args = [maybe_to_constant(a) for a in flat_args] + const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) + out = func(*const_args, **const_kwargs) + if type(out) is torch.Tensor and self.may_turn_const(out): + # NB: not in_kernel_invocation_manager because we're doing real + # compute here + # NB: no_dispatch() here is VERY DANGEROUS (like, segfault + # dangerous) if this is actually a wrapper subclass tensor, + # therefore the exact type test above + with no_dispatch(): + out = out.clone() + return converter(self, out, make_constant=True) + + # See [subclass inputs] below + # NB: If you're seeing a mysterious infinite loop involving fake + # tensor, it might be related to this line. Though I'm not sure + # how you'll know to read this comment, as this line won't show up + # in the stack trace. + unrecognized_types = self.check_for_subclass(flat_args) + if unrecognized_types: + not_implemented_log.debug( + "FakeTensorMode unrecognized subclass(es): %s", unrecognized_types + ) + return NotImplemented + + # if we are in the dispatch mode, we will enter this function even if the inputs + # are not FakeTensors. For now, throw if any non-Fake Tensor inputs + # and just support constructors. + + # this is generated from torch.tensor(), which does not use the + # dispatcher, to allow wrapper subclasses to wrap the new tensor + if func in self.lift_fns: + assert len(kwargs) == 0 and len(args) == 1, f"{args} {kwargs}" + + if type(args[0]) is torch.Tensor: + return converter(self, args[0]) + + # Recompute flat_arg_fake_tensors here again in case some of the inputs + # were real tensors and fakified in validate_and_convert_non_fake_tensors + (flat_args, flat_arg_fake_tensors) = self.validate_and_convert_non_fake_tensors( + func, converter, flat_args, args_spec + ) + del args, kwargs # Invalidated + + # The current constant handling only support tracing systems + # (aot autograd, torchdynamo) where each operation is run consecutively. + # Because each operation is run in order, we can trace out and support + # sequences like: x = torch.tensor(0.); y = x.add_(1) + # Whenver a constant is written to but with inputs that cannot be evaluated + # statically, such as random_(), we invalidate all constants that alias the input + # We will rely on functionalization for use of fake tensors constants as persistent + # objects on an FX Graph. + + # We dispatch size/stride/numel on the FakeTensor not its constant, so bail on inplace_view + all_constant = all(e.constant is not None for e in flat_arg_fake_tensors) + if ( + torch.Tag.nondeterministic_seeded not in func.tags + and torch.Tag.inplace_view not in func.tags + and all_constant + and len(flat_arg_fake_tensors) != 0 + and not has_symbolic_sizes + ): + const_flat_args = [maybe_to_constant(a) for a in flat_args] + const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) + + # NB: not in_kernel_invocation_manager(self) as we want to do REAL + # compute + with no_dispatch(): + out = func(*const_args, **const_kwargs) + + flat_out = pytree.tree_leaves(out) + flat_out_tensors = [t for t in flat_out if isinstance(t, torch.Tensor)] + all_constant = all(self.may_turn_const(t) for t in flat_out_tensors) + + if all_constant: + return pytree.tree_map_only( + torch.Tensor, + lambda t: converter(self, t, make_constant=True), + out, + ) + + # we weren't able to turn outputs to constants, + # so invalidate all constants that might be aliases of the outputs + for ten in flat_out_tensors: + converter.invalidate_constant_aliases(ten) + + # we are falling through to running non constant tensors, any input constant that + # is written to must be invalidated + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + self.invalidate_written_to_constants(func, flat_arg_fake_tensors, args, kwargs) + + # Try for fastpath + if has_symbolic_sizes: + fast_impl = get_fast_op_impls().get(func) + if fast_impl is not None: + return fast_impl(self, *args, **kwargs) + + # If there's a Python meta, prefer that over the decomposition + from torch._decomp import meta_table as meta_table + + if func not in meta_table and not self.cpp_meta_supports_symint(func): + from torch._decomp import decomposition_table + + # Prefer Python decompositions over C++ ones + if func in decomposition_table and ( + has_symbolic_sizes + or ( + # TODO: Remove these exclusions, so that we can remove + # this leg entirely + torch_decomp_decompositions(func) + and all(not e.is_sparse for e in flat_arg_fake_tensors) + ) + ): + with self: + return decomposition_table[func](*args, **kwargs) + + with self: + # Decomposes CompositeImplicitAutograd ops + r = func.decompose(*args, **kwargs) + if r is not NotImplemented: + return r + + # prims already wrap FakeTensor inputs to FakeTensor outputs + # and do device logic, we dont need do anything but run them + # and ensure that Meta kernels are dispatched to (see) + # Fake Tensor Dispatch Keys + # TODO - we should be use the prim aten impl + # TODO - fix prims complex ops + if ( + "prims::" in func._schema.name + and hasattr(func, "prim_meta_impl") + and not stride_incorrect_op(func) + ): + with self: + return func.prim_meta_impl(*args, **kwargs) + + # Users can register FakeTensor rules for custom operators + # Call them if they exist. + maybe_abstract_impl = torch._library.simple_registry.singleton.find( + func.name() + ).abstract_impl.kernel + if maybe_abstract_impl: + ctx = torch._library.abstract_impl.AbstractImplCtx(self.shape_env, func) + with torch._library.abstract_impl.set_ctx_getter(lambda: ctx), self: + result = maybe_abstract_impl(*args, **kwargs) + return result + + # special handling for funcs registered through `register_op_impl`, + # e.g., manipulating args on constructor calls to construct meta tensors + # and then afterwards wrapping them to a FakeTensor + for run_impl_check, op_impl in op_implementations: + if func in ( + aten._nested_tensor_from_tensor_list.default, + aten._nested_tensor_from_tensor_list.out, + ): + raise UnsupportedOperatorException( + "torch.compile does not support strided NestedTensor" + ) + if run_impl_check(func): + op_impl_out = op_impl(self, func, *args, **kwargs) + if op_impl_out != NotImplemented: + return op_impl_out + + def can_run_unsafe_fallback(func: OpOverload): + if not self.allow_fallback_kernels: + return False + # It's OK to try the fallback for built-in ops (e.g. aten, prims) + # because we control and test these but the fallback leads to unexpected behavior + # in user-defined custom ops + # + # WARNING: DO NOT add any additional namespaces/operators here if they refer to operators + # outside of the pytorch/pytorch library! Any pre-existing things here + # are either in the pytorch/pytorch library or have been grandfathered in. + # The fallback does not always work and MAY CRASH and emit unreadable error messages + # so it should not be allowed by default. + allowed_namespaces = { + "debugprims", + "prims", + "aten", + "xla", + "vision", + "torchtext", + "torchaudio", + "quantized", + } + grandfathered_ops_FIXME = { + "fbgemm::gmm", + } + return ( + func.namespace in allowed_namespaces + or func.name() in grandfathered_ops_FIXME + ) + + def maybe_run_unsafe_fallback(error=None): + # We infer the meta of a custom ops that return None to just + # return None. custom ops are not allowed to mutate metadata + # of their inputs, so this is safe. + from torch._higher_order_ops.auto_functionalize import ( + can_auto_functionalize, + ) + + if can_auto_functionalize(func): + return None + # no meta kernel registered, fallback to kernel for the device + if has_symbolic_sizes or not can_run_unsafe_fallback(func): + raise UnsupportedOperatorException(func) + if error is None: + error = UnsupportedOperatorException(func) + return run_fallback_kernel(self, func, flat_args, args_spec, error) + + # Optimization: If there is no Meta kernel, it takes a surprisingly long + # amount of time to catch the NotImplementedError, so we check it here. + if not torch._C._dispatch_has_computed_kernel_for_dispatch_key( + func.name(), "Meta" + ): + return maybe_run_unsafe_fallback() + + # run kernel registered to meta for func, which include + # python meta registrations, prims, decomps, and c++ meta fns (structured kernels) + # It's possible that the kernel will return NotImplementedError + try: + with in_kernel_invocation_manager(self): + r = func(*args, **kwargs) + except NotImplementedError as not_implemented_error: + return maybe_run_unsafe_fallback(not_implemented_error) + + return self.wrap_meta_outputs_with_default_device_logic( + r, func, flat_args, device=kwargs.get("device") + ) + + # [subclass inputs] + # Suppose we enable fake tensor mode. This means that fake tensor + # mode will run first. But what if we do an operation that + # involves a tensor subclass that will desugar into normal tensor + # operations? Without returning NotImplemented, fake tensor mode will run first, + # decide that a conversion was made (since there was a non fake + # tensor argument), and report an error that converting non + # fake tensor is not supported. What we actually wanted to happen + # was to give the subclass a chance to figure out what it wants to + # before erroring out. Returning NotImplemented here allows this. + def check_for_subclass(self, flat_args): + def check(x): + return ( + isinstance(x, torch.Tensor) + and not isinstance(x, FakeTensor) + and type(x) is not torch.Tensor + and type(x) is not torch.nn.Parameter + ) + + return [type(x) for x in flat_args if check(x)] + + def validate_and_convert_non_fake_tensors( + self, func, converter, flat_args, args_spec + ): + """ + Checks if the list of tensors are fake tensors. + If not, try to convert them to fake tensors. + Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors. + """ + flat_arg_fake_tensors = [] + + def validate(x): + if not isinstance(x, torch.Tensor): + return x + + nonlocal flat_arg_fake_tensors + if not self.is_our_fake(x): + if torch.Tag.inplace_view in func.tags: + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + raise Exception( + f"Can't call metadata mutating ops on non-Fake Tensor inputs. Found in {render_call(func, args, kwargs)}" + ) + if not self.allow_non_fake_inputs: + if isinstance(x, FakeTensor) and x.fake_mode is not self: + raise AssertionError("Mixing fake modes NYI") + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + raise Exception( + f"Please convert all Tensors to FakeTensors first or instantiate FakeTensorMode " + f"with 'allow_non_fake_inputs'. Found in {render_call(func, args, kwargs)}" + ) + + x = converter(self, x) + + flat_arg_fake_tensors.append(x) + return x + + validated_args = [validate(a) for a in flat_args] + return validated_args, flat_arg_fake_tensors + + def wrap_meta_outputs_with_default_device_logic(self, r, func, flat_args, device): + converter = self.fake_tensor_converter + + # Lazily initialized, in case there are no tensor returns + common_device = None + has_scalar_only_inputs = False + + def wrap(e): + nonlocal common_device + nonlocal has_scalar_only_inputs + + if isinstance(e, torch.Tensor) and common_device is None: + ( + common_device, + has_scalar_only_inputs, + ) = FakeTensor._find_common_device(func, flat_args) + + if self.is_our_fake(e): + torch._check( + e.device == common_device, + lambda: f"FakeTensor is wrapped to wrong device, found {e.device}, expected {common_device}", + ) + + if ( + isinstance(e, torch.Tensor) + and not self.is_our_fake(e) + and converter is not None + ): + if has_scalar_only_inputs: + # Under FakeTensorMode, op accepts scalar only inputs, such as aten.add/sub/mul/div, + # returns a real scalar tensor on CPU. See TensorMeta() in _prims/__init__.py for details. + # We thus directly convert real tensor to fake tensor. + return converter(self, e) + else: + return converter.from_meta_and_device( + self, e, device or common_device + ) + else: + return e + + return tree_map(wrap, r) + + def cpp_meta_supports_symint(self, func): + if torch.Tag.view_copy in func.tags: + return True + return func in [ + aten.empty.memory_format, + aten.empty_strided.default, + aten.as_strided_scatter.default, + aten.as_strided.default, + aten.as_strided_.default, + aten.zeros.default, + aten.detach.default, + aten.view_as_real.default, + aten.view_as_complex.default, + aten.set_.source_Storage_storage_offset, + aten._sparse_coo_tensor_with_dims_and_tensors.default, + ] + + @property + def lift_fns(self): + return (aten.lift_fresh.default, aten.lift_fresh_copy.default) + + def may_turn_const(self, t): + return ( + t.numel() <= CONSTANT_NUMEL_LIMIT + and not t.is_sparse + and not self.is_our_fake(t) + and not t.device.type == "meta" + ) + + def invalidate_written_to_constants( + self, func, flat_arg_fake_tensors, args, kwargs + ): + any_constant = any(e.constant is not None for e in flat_arg_fake_tensors) + schema_info = get_schema_info(func) + if any_constant and schema_info.is_mutable(): + _, new_kwargs = normalize_function( + func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True + ) + for k, v in new_kwargs.items(): + k = k if (k != "input" or schema_info.has_argument(k)) else "self" + if ( + self.is_our_fake(v) + and schema_info.is_mutable(k) + and v.constant is not None + ): + self.fake_tensor_converter.invalidate_constant_aliases(v.constant) + + def from_tensor( + self, + tensor, + *, + static_shapes=None, + source: Optional[Source] = None, + symbolic_context=None, + # Setting this flag will force FakeTensorMode to return `None` if attempting to convert a tensor we have not + # seen before. + memoized_only=False, + ): + shape_env = self.shape_env + if static_shapes is None: + static_shapes = self.static_shapes + if static_shapes: + assert ( + symbolic_context is None + ), "cannot set both static_shapes and symbolic_context" + shape_env = None + # see note [Tensor Fakification and Symbol Caching] + if not symbolic_context and not source and not static_shapes: + if tracing_context := torch._guards.TracingContext.try_get(): + if tensor in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[tensor] + source = symbolic_context.tensor_source + return self.fake_tensor_converter( + self, + tensor, + shape_env=shape_env, + source=source, + symbolic_context=symbolic_context, + memoized_only=memoized_only, + ) + + +# NB: returns fake tensors +def run_fallback_kernel( + fake_mode, func, flat_args, args_spec, orig_not_implemented_exception +): + # these should all be supported, just to be safe + # avoid fallback for operators which inplace modify metadata + # because the input fake tensors would be umodified + if torch.Tag.inplace_view in func.tags: + raise orig_not_implemented_exception + + inp_impls = {} + + # Don't use in_kernel_invocation_manager(fake_mode) as we want to do + # REAL compute (not with meta device) + with no_dispatch(): + + def to_real_tensor(e): + if fake_mode.is_our_fake(e): + out = torch.zeros_like(e, device=e.fake_device) + if e.is_sparse: + out._coalesced_(e.is_coalesced()) + inp_impls[id(out)] = e + return out + return e + + flat_args = [to_real_tensor(a) for a in flat_args] + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + + r = func(*args, **kwargs) + + tensor_impls = set() + storages = set() + + for e in flat_args: + if isinstance(e, torch.Tensor): + if not e.is_sparse: + storages.add(e._typed_storage()._cdata) + + # TODO: also check metadata change on inputs + # proper aliasing/metadata relationship between outputs and inputs will + # not be set up, bc of conversion to device, unless we can reuse an + # input impl + + def map_out(e): + if id(e) not in inp_impls and ( + isinstance(e, torch.Tensor) + and not e.is_sparse + and e._typed_storage()._cdata in storages + ): + raise orig_not_implemented_exception + + if isinstance(e, torch.Tensor): + if id(e) in inp_impls: + return inp_impls[id(e)] + else: + return fake_mode.fake_tensor_converter(fake_mode, e) + else: + return e + + return pytree.tree_map(map_out, r) + + +# Just for use to allow copying a module to fake tensors, +# does not apply elsewhere +class FakeCopyMode(TorchFunctionMode): + def __init__(self, fake_mode): + self.fake_mode = fake_mode + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + + # clone will get called in Parameter deepcopy + if func == torch._C.TensorBase.clone: + return func( + self.fake_mode.from_tensor(args[0], static_shapes=True), **kwargs + ) + elif func == torch.Tensor.__deepcopy__: + assert len(args) == 2 and len(kwargs) == 0 + tensor, memo = args + + if id(tensor) in memo: + return memo[id(tensor)] + + out = self.fake_mode.from_tensor(tensor, static_shapes=True) + memo[id(tensor)] = out + return out + else: + with torch._C.DisableTorchFunctionSubclass(): + return func(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2df99e709b7e83f06f6cf9780b6cee8aa50d8576 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__init__.py @@ -0,0 +1,19 @@ +import torch + +__all__ = [ + "get_cpu_capability", +] + + +def get_cpu_capability() -> str: + r"""Return cpu capability as a string value. + + Possible values: + - "DEFAULT" + - "VSX" + - "Z VECTOR" + - "NO AVX" + - "AVX2" + - "AVX512" + """ + return torch._C._get_cpu_capability() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f741884a2a317b0721092615dd761e3945417f25 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abcbb5b6f097c506cbb8a06442383cdf8e6eb9f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..204868c29b6c9b469c6611acbe9e66f39c33ff01 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py @@ -0,0 +1,205 @@ +import os +import sys +import warnings +from contextlib import contextmanager + +import torch +from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule + +try: + from torch._C import _cudnn +except ImportError: + _cudnn = None # type: ignore[assignment] + +# Write: +# +# torch.backends.cudnn.enabled = False +# +# to globally disable CuDNN/MIOpen + +__cudnn_version = None + +if _cudnn is not None: + + def _init(): + global __cudnn_version + if __cudnn_version is None: + __cudnn_version = _cudnn.getVersionInt() + runtime_version = _cudnn.getRuntimeVersion() + compile_version = _cudnn.getCompileVersion() + runtime_major, runtime_minor, _ = runtime_version + compile_major, compile_minor, _ = compile_version + # Different major versions are always incompatible + # Starting with cuDNN 7, minor versions are backwards-compatible + # Not sure about MIOpen (ROCm), so always do a strict check + if runtime_major != compile_major: + cudnn_compatible = False + elif runtime_major < 7 or not _cudnn.is_cuda: + cudnn_compatible = runtime_minor == compile_minor + else: + cudnn_compatible = runtime_minor >= compile_minor + if not cudnn_compatible: + if os.environ.get("PYTORCH_SKIP_CUDNN_COMPATIBILITY_CHECK", "0") == "1": + return True + base_error_msg = ( + f"cuDNN version incompatibility: " + f"PyTorch was compiled against {compile_version} " + f"but found runtime version {runtime_version}. " + f"PyTorch already comes bundled with cuDNN. " + f"One option to resolving this error is to ensure PyTorch " + f"can find the bundled cuDNN. " + ) + + if "LD_LIBRARY_PATH" in os.environ: + ld_library_path = os.environ.get("LD_LIBRARY_PATH", "") + if any( + substring in ld_library_path for substring in ["cuda", "cudnn"] + ): + raise RuntimeError( + f"{base_error_msg}" + f"Looks like your LD_LIBRARY_PATH contains incompatible version of cudnn. " + f"Please either remove it from the path or install cudnn {compile_version}" + ) + else: + raise RuntimeError( + f"{base_error_msg}" + f"one possibility is that there is a " + f"conflicting cuDNN in LD_LIBRARY_PATH." + ) + else: + raise RuntimeError(base_error_msg) + + return True + +else: + + def _init(): + return False + + +def version(): + """Return the version of cuDNN.""" + if not _init(): + return None + return __cudnn_version + + +CUDNN_TENSOR_DTYPES = { + torch.half, + torch.float, + torch.double, +} + + +def is_available(): + r"""Return a bool indicating if CUDNN is currently available.""" + return torch._C._has_cudnn + + +def is_acceptable(tensor): + if not torch._C._get_cudnn_enabled(): + return False + if tensor.device.type != "cuda" or tensor.dtype not in CUDNN_TENSOR_DTYPES: + return False + if not is_available(): + warnings.warn( + "PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild " + "PyTorch making sure the library is visible to the build system." + ) + return False + if not _init(): + warnings.warn( + "cuDNN/MIOpen library not found. Check your {libpath}".format( + libpath={"darwin": "DYLD_LIBRARY_PATH", "win32": "PATH"}.get( + sys.platform, "LD_LIBRARY_PATH" + ) + ) + ) + return False + return True + + +def set_flags( + _enabled=None, + _benchmark=None, + _benchmark_limit=None, + _deterministic=None, + _allow_tf32=None, +): + orig_flags = ( + torch._C._get_cudnn_enabled(), + torch._C._get_cudnn_benchmark(), + None if not is_available() else torch._C._cuda_get_cudnn_benchmark_limit(), + torch._C._get_cudnn_deterministic(), + torch._C._get_cudnn_allow_tf32(), + ) + if _enabled is not None: + torch._C._set_cudnn_enabled(_enabled) + if _benchmark is not None: + torch._C._set_cudnn_benchmark(_benchmark) + if _benchmark_limit is not None and is_available(): + torch._C._cuda_set_cudnn_benchmark_limit(_benchmark_limit) + if _deterministic is not None: + torch._C._set_cudnn_deterministic(_deterministic) + if _allow_tf32 is not None: + torch._C._set_cudnn_allow_tf32(_allow_tf32) + return orig_flags + + +@contextmanager +def flags( + enabled=False, + benchmark=False, + benchmark_limit=10, + deterministic=False, + allow_tf32=True, +): + with __allow_nonbracketed_mutation(): + orig_flags = set_flags( + enabled, benchmark, benchmark_limit, deterministic, allow_tf32 + ) + try: + yield + finally: + # recover the previous values + with __allow_nonbracketed_mutation(): + set_flags(*orig_flags) + + +# The magic here is to allow us to intercept code like this: +# +# torch.backends..enabled = True + + +class CudnnModule(PropModule): + def __init__(self, m, name): + super().__init__(m, name) + + enabled = ContextProp(torch._C._get_cudnn_enabled, torch._C._set_cudnn_enabled) + deterministic = ContextProp( + torch._C._get_cudnn_deterministic, torch._C._set_cudnn_deterministic + ) + benchmark = ContextProp( + torch._C._get_cudnn_benchmark, torch._C._set_cudnn_benchmark + ) + benchmark_limit = None + if is_available(): + benchmark_limit = ContextProp( + torch._C._cuda_get_cudnn_benchmark_limit, + torch._C._cuda_set_cudnn_benchmark_limit, + ) + allow_tf32 = ContextProp( + torch._C._get_cudnn_allow_tf32, torch._C._set_cudnn_allow_tf32 + ) + + +# This is the sys.modules replacement trick, see +# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 +sys.modules[__name__] = CudnnModule(sys.modules[__name__], __name__) + +# Add type annotation for the replaced module +enabled: bool +deterministic: bool +benchmark: bool +allow_tf32: bool +benchmark_limit: int diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b51e05b0411856b02c690ce6ae29d1fd6b0a547f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88fe8a07b03e57142f281d3c07dca59069e338d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..5ce166c8b28a4749113a77ece86d33c4f04ed8e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py @@ -0,0 +1,62 @@ +import torch.cuda + +try: + from torch._C import _cudnn +except ImportError: + # Uses of all the functions below should be guarded by torch.backends.cudnn.is_available(), + # so it's safe to not emit any checks here. + _cudnn = None # type: ignore[assignment] + + +def get_cudnn_mode(mode): + if mode == "RNN_RELU": + return int(_cudnn.RNNMode.rnn_relu) + elif mode == "RNN_TANH": + return int(_cudnn.RNNMode.rnn_tanh) + elif mode == "LSTM": + return int(_cudnn.RNNMode.lstm) + elif mode == "GRU": + return int(_cudnn.RNNMode.gru) + else: + raise Exception(f"Unknown mode: {mode}") + + +# NB: We don't actually need this class anymore (in fact, we could serialize the +# dropout state for even better reproducibility), but it is kept for backwards +# compatibility for old models. +class Unserializable: + def __init__(self, inner): + self.inner = inner + + def get(self): + return self.inner + + def __getstate__(self): + # Note: can't return {}, because python2 won't call __setstate__ + # if the value evaluates to False + return "" + + def __setstate__(self, state): + self.inner = None + + +def init_dropout_state(dropout, train, dropout_seed, dropout_state): + dropout_desc_name = "desc_" + str(torch.cuda.current_device()) + dropout_p = dropout if train else 0 + if (dropout_desc_name not in dropout_state) or ( + dropout_state[dropout_desc_name].get() is None + ): + if dropout_p == 0: + dropout_state[dropout_desc_name] = Unserializable(None) + else: + dropout_state[dropout_desc_name] = Unserializable( + torch._cudnn_init_dropout_state( # type: ignore[call-arg] + dropout_p, + train, + dropout_seed, + self_ty=torch.uint8, + device=torch.device("cuda"), + ) + ) + dropout_ts = dropout_state[dropout_desc_name].get() + return dropout_ts diff --git a/env-llmeval/lib/python3.10/site-packages/torch/bin/torch_shm_manager b/env-llmeval/lib/python3.10/site-packages/torch/bin/torch_shm_manager new file mode 100644 index 0000000000000000000000000000000000000000..d3bacaf1d7ee380536f64f49cde0e1070aa19b9d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/bin/torch_shm_manager differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3af0984d4de0baa84472e1e87bbae33a45f36d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__init__.py @@ -0,0 +1,177 @@ +from torch import _C +from torch._C import _onnx as _C_onnx +from torch._C._onnx import ( + _CAFFE2_ATEN_FALLBACK, + OperatorExportTypes, + TensorProtoDataType, + TrainingMode, +) + +from . import ( # usort:skip. Keep the order instead of sorting lexicographically + _deprecation, + errors, + symbolic_caffe2, + symbolic_helper, + symbolic_opset7, + symbolic_opset8, + symbolic_opset9, + symbolic_opset10, + symbolic_opset11, + symbolic_opset12, + symbolic_opset13, + symbolic_opset14, + symbolic_opset15, + symbolic_opset16, + symbolic_opset17, + symbolic_opset18, + utils, +) + +# TODO(After 1.13 release): Remove the deprecated SymbolicContext +from ._exporter_states import ExportTypes, SymbolicContext +from ._type_utils import JitScalarType +from .errors import CheckerError # Backwards compatibility +from .utils import ( + _optimize_graph, + _run_symbolic_function, + _run_symbolic_method, + export, + export_to_pretty_string, + is_in_onnx_export, + register_custom_op_symbolic, + select_model_mode_for_export, + unregister_custom_op_symbolic, +) + +from ._internal.exporter import ( # usort:skip. needs to be last to avoid circular import + DiagnosticOptions, + ExportOptions, + ONNXProgram, + ONNXProgramSerializer, + ONNXRuntimeOptions, + InvalidExportOptionsError, + OnnxExporterError, + OnnxRegistry, + dynamo_export, + enable_fake_mode, +) + +from ._internal.onnxruntime import ( + is_onnxrt_backend_supported, + OrtBackend as _OrtBackend, + OrtBackendOptions as _OrtBackendOptions, + OrtExecutionProvider as _OrtExecutionProvider, +) + +__all__ = [ + # Modules + "symbolic_helper", + "utils", + "errors", + # All opsets + "symbolic_caffe2", + "symbolic_opset7", + "symbolic_opset8", + "symbolic_opset9", + "symbolic_opset10", + "symbolic_opset11", + "symbolic_opset12", + "symbolic_opset13", + "symbolic_opset14", + "symbolic_opset15", + "symbolic_opset16", + "symbolic_opset17", + "symbolic_opset18", + # Enums + "ExportTypes", + "OperatorExportTypes", + "TrainingMode", + "TensorProtoDataType", + "JitScalarType", + # Public functions + "export", + "export_to_pretty_string", + "is_in_onnx_export", + "select_model_mode_for_export", + "register_custom_op_symbolic", + "unregister_custom_op_symbolic", + "disable_log", + "enable_log", + # Errors + "CheckerError", # Backwards compatibility + # Dynamo Exporter + "DiagnosticOptions", + "ExportOptions", + "ONNXProgram", + "ONNXProgramSerializer", + "ONNXRuntimeOptions", + "InvalidExportOptionsError", + "OnnxExporterError", + "OnnxRegistry", + "dynamo_export", + "enable_fake_mode", + # DORT / torch.compile + "is_onnxrt_backend_supported", +] + +# Set namespace for exposed private names +ExportTypes.__module__ = "torch.onnx" +JitScalarType.__module__ = "torch.onnx" +ExportOptions.__module__ = "torch.onnx" +ONNXProgram.__module__ = "torch.onnx" +ONNXProgramSerializer.__module__ = "torch.onnx" +ONNXRuntimeOptions.__module__ = "torch.onnx" +dynamo_export.__module__ = "torch.onnx" +InvalidExportOptionsError.__module__ = "torch.onnx" +OnnxExporterError.__module__ = "torch.onnx" +enable_fake_mode.__module__ = "torch.onnx" +OnnxRegistry.__module__ = "torch.onnx" +DiagnosticOptions.__module__ = "torch.onnx" +is_onnxrt_backend_supported.__module__ = "torch.onnx" +_OrtExecutionProvider.__module__ = "torch.onnx" +_OrtBackendOptions.__module__ = "torch.onnx" +_OrtBackend.__module__ = "torch.onnx" + +producer_name = "pytorch" +producer_version = _C_onnx.PRODUCER_VERSION + + +@_deprecation.deprecated( + since="1.12.0", removed_in="2.0", instructions="use `torch.onnx.export` instead" +) +def _export(*args, **kwargs): + return utils._export(*args, **kwargs) + + +# TODO(justinchuby): Deprecate these logging functions in favor of the new diagnostic module. + +# Returns True iff ONNX logging is turned on. +is_onnx_log_enabled = _C._jit_is_onnx_log_enabled + + +def enable_log() -> None: + r"""Enables ONNX logging.""" + _C._jit_set_onnx_log_enabled(True) + + +def disable_log() -> None: + r"""Disables ONNX logging.""" + _C._jit_set_onnx_log_enabled(False) + + +"""Sets output stream for ONNX logging. + +Args: + stream_name (str, default "stdout"): Only 'stdout' and 'stderr' are supported + as ``stream_name``. +""" +set_log_stream = _C._jit_set_onnx_log_output_stream + + +"""A simple logging facility for ONNX exporter. + +Args: + args: Arguments are converted to string, concatenated together with a newline + character appended to the end, and flushed to output stream. +""" +log = _C._jit_onnx_log diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b89d70dc5133f47177edea9399850bfae4c9e225 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f219814dc2b783e3786d0147f5e06464fbf1a5ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a80a638d30ed5bd811d82e9ef56912daf19233b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07ff499fd89380c6d64db90615cf484a0904885e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b4eea343d3e2772c758a1d539d3f9121660859a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6cc4b547e76abd3dec6ea8cdd5a174873add0f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eded8a1a3202b66a0bd7fd913d72e0ffe2a328c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75eec2f9ab702756b15650211129d3d8e54e25d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cfdd9807cdf16b88c7168760c3ae1f0c7d62365 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..debbe70704b6d947a5ae7a838b187f435dc6f4a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee457620da9e5e2a96386150d8fd44cac2a91d18 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b17eae0039fcd8fc7ef117b32673467f52d200c8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f695869e5b60ebb55a736bb85e2de5619915d7a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d81977b46f3a55fabcc4208e7c95500af50bf636 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd36695f8e5a0a13f54136f3fc6f99a1f13355af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef8c880388de1d5dceea947173d79a506f13ff16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_constants.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..bcdcf4fa6112ab75d1251304ef9928edc212c2c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_constants.py @@ -0,0 +1,25 @@ +"""Constant values used in ONNX.""" + +ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO" + +ONNX_BASE_OPSET = 9 +ONNX_MIN_OPSET = 7 +ONNX_MAX_OPSET = 19 +ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 17 +# ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py +ONNX_DEFAULT_OPSET = 17 +ONNX_CONSTANT_FOLDING_MIN_OPSET = 9 + +PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" + +INT64_MAX = 9223372036854775807 +INT32_MAX = 2147483647 +INT16_MAX = 32767 +INT8_MAX = 127 +UINT8_MAX = 255 + +INT64_MIN = -9223372036854775808 +INT32_MIN = -2147483648 +INT16_MIN = -32768 +INT8_MIN = -128 +UINT8_MIN = 0 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_deprecation.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..0fd2cd764fc9511208b83819b60571ebfa508078 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_deprecation.py @@ -0,0 +1,64 @@ +"""Utility for deprecating functions.""" + +import functools +import textwrap +import warnings + + +def deprecated(since: str, removed_in: str, instructions: str): + """Marks functions as deprecated. + + It will result in a warning when the function is called and a note in the + docstring. + + Args: + since: The version when the function was first deprecated. + removed_in: The version when the function will be removed. + instructions: The action users should take. + """ + + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + warnings.warn( + f"'{function.__module__}.{function.__name__}' " + f"is deprecated in version {since} and will be " + f"removed in {removed_in}. Please {instructions}.", + category=FutureWarning, + stacklevel=2, + ) + return function(*args, **kwargs) + + # Add a deprecation note to the docstring. + docstring = function.__doc__ or "" + + # Add a note to the docstring. + deprecation_note = textwrap.dedent( + f"""\ + .. deprecated:: {since} + Deprecated and will be removed in version {removed_in}. + Please {instructions}. + """ + ) + + # Split docstring at first occurrence of newline + summary_and_body = docstring.split("\n\n", 1) + + if len(summary_and_body) > 1: + summary, body = summary_and_body + + # Dedent the body. We cannot do this with the presence of the summary because + # the body contains leading whitespaces when the summary does not. + body = textwrap.dedent(body) + + new_docstring_parts = [deprecation_note, "\n\n", summary, body] + else: + summary = summary_and_body[0] + + new_docstring_parts = [deprecation_note, "\n\n", summary] + + wrapper.__doc__ = "".join(new_docstring_parts) + + return wrapper + + return decorator diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_exporter_states.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_exporter_states.py new file mode 100644 index 0000000000000000000000000000000000000000..dddc92befe6e123e1ec9c12f2b0e4cb7e5eccfa6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_exporter_states.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from typing import Dict + +from torch import _C + + +class ExportTypes: + r"""Specifies how the ONNX model is stored.""" + + PROTOBUF_FILE = "Saves model in the specified protobuf file." + ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)." + COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)." + DIRECTORY = "Saves model in the specified folder." + + +class SymbolicContext: + """Extra context for symbolic functions. + + Args: + params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue. + env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value. + cur_node (_C.Node): Current node being converted to ONNX domain. + onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to. + """ + + def __init__( + self, + params_dict: Dict[str, _C.IValue], + env: dict, + cur_node: _C.Node, + onnx_block: _C.Block, + ): + self.params_dict: Dict[str, _C.IValue] = params_dict + self.env: Dict[_C.Value, _C.Value] = env + # Current node that is being converted. + self.cur_node: _C.Node = cur_node + # Current onnx block that converted nodes are being appended to. + self.onnx_block: _C.Block = onnx_block diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_globals.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..f827d12be7fbf4b1dbe29c5111f50b22b0a9839b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_globals.py @@ -0,0 +1,85 @@ +"""Globals used internally by the ONNX exporter. + +Do not use this module outside of `torch.onnx` and its tests. + +Be very judicious when adding any new global variables. Do not create new global +variables unless they are absolutely necessary. +""" +import torch._C._onnx as _C_onnx + +# This module should only depend on _constants and nothing else in torch.onnx to keep +# dependency direction clean. +from torch.onnx import _constants + + +class _InternalGlobals: + """Globals used internally by ONNX exporter. + + NOTE: Be very judicious when adding any new variables. Do not create new + global variables unless they are absolutely necessary. + """ + + def __init__(self): + self._export_onnx_opset_version = _constants.ONNX_DEFAULT_OPSET + self._training_mode: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL + self._in_onnx_export: bool = False + # Whether the user's model is training during export + self.export_training: bool = False + self.operator_export_type: _C_onnx.OperatorExportTypes = ( + _C_onnx.OperatorExportTypes.ONNX + ) + self.onnx_shape_inference: bool = True + self._autograd_inlining: bool = True + + @property + def training_mode(self): + """The training mode for the exporter.""" + return self._training_mode + + @training_mode.setter + def training_mode(self, training_mode: _C_onnx.TrainingMode): + if not isinstance(training_mode, _C_onnx.TrainingMode): + raise TypeError( + "training_mode must be of type 'torch.onnx.TrainingMode'. This is " + "likely a bug in torch.onnx." + ) + self._training_mode = training_mode + + @property + def export_onnx_opset_version(self) -> int: + """Opset version used during export.""" + return self._export_onnx_opset_version + + @export_onnx_opset_version.setter + def export_onnx_opset_version(self, value: int): + supported_versions = range( + _constants.ONNX_MIN_OPSET, _constants.ONNX_MAX_OPSET + 1 + ) + if value not in supported_versions: + raise ValueError(f"Unsupported ONNX opset version: {value}") + self._export_onnx_opset_version = value + + @property + def in_onnx_export(self) -> bool: + """Whether it is in the middle of ONNX export.""" + return self._in_onnx_export + + @in_onnx_export.setter + def in_onnx_export(self, value: bool): + if type(value) is not bool: + raise TypeError("in_onnx_export must be a boolean") + self._in_onnx_export = value + + @property + def autograd_inlining(self) -> bool: + """Whether Autograd must be inlined.""" + return self._autograd_inlining + + @autograd_inlining.setter + def autograd_inlining(self, value: bool): + if type(value) is not bool: + raise TypeError("autograd_inlining must be a boolean") + self._autograd_inlining = value + + +GLOBALS = _InternalGlobals() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab63955925cf88d34a9f0ac63a62516653b69cb5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_beartype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_beartype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a73b6b7d9771e4ad8e72b69e378619d079049ad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_beartype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/exporter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/exporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..569db79b60d58b2959e67a8592a6eeadda8b175c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/exporter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb6996a03b9a535f9a061f1e11d229c2d2f6f288 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..933f3b519dbae3ca10ad7332f94bf5553754e183 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..142621077b9e4dce521e2791855e1ab60621e37a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4579a24ae60c7316d48016dfeb4fbfcf9dfd3f54 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2b6dfa95160eacd5921ec444b11f13d591a57cd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py new file mode 100644 index 0000000000000000000000000000000000000000..30c0474f2beec289eaf47b0e905c4db6720a0134 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py @@ -0,0 +1,131 @@ +"""An internal wrapper for the beartype library. + +The module returns a no-op decorator when the beartype library is not installed. +""" +import enum +import functools +import os +import traceback +import typing +import warnings +from types import ModuleType + +try: + import beartype as _beartype_lib # type: ignore[import] + from beartype import roar as _roar # type: ignore[import] + + # Beartype warns when we import from typing because the types are deprecated + # in Python 3.9. But there will be a long time until we can move to using + # the native container types for type annotations (when 3.9 is the lowest + # supported version). So we silence the warning. + warnings.filterwarnings( + "ignore", + category=_roar.BeartypeDecorHintPep585DeprecationWarning, + ) + + if _beartype_lib.__version__ == "0.16.0": + # beartype 0.16.0 has a bug that causes it to crash when used with + # PyTorch. See https://github.com/beartype/beartype/issues/282 + warnings.warn("beartype 0.16.0 is not supported. Please upgrade to 0.16.1+.") + _beartype_lib = None # type: ignore[assignment] +except ImportError: + _beartype_lib = None # type: ignore[assignment] +except Exception as e: + # Warn errors that are not import errors (unexpected). + warnings.warn(f"{e}") + _beartype_lib = None # type: ignore[assignment] + + +@enum.unique +class RuntimeTypeCheckState(enum.Enum): + """Runtime type check state.""" + + # Runtime type checking is disabled. + DISABLED = enum.auto() + # Runtime type checking is enabled but warnings are shown only. + WARNINGS = enum.auto() + # Runtime type checking is enabled. + ERRORS = enum.auto() + + +class CallHintViolationWarning(UserWarning): + """Warning raised when a type hint is violated during a function call.""" + + pass + + +def _no_op_decorator(func): + return func + + +def _create_beartype_decorator( + runtime_check_state: RuntimeTypeCheckState, +): + # beartype needs to be imported outside of the function and aliased because + # this module overwrites the name "beartype". + + if runtime_check_state == RuntimeTypeCheckState.DISABLED: + return _no_op_decorator + if _beartype_lib is None: + # If the beartype library is not installed, return a no-op decorator + return _no_op_decorator + + assert isinstance(_beartype_lib, ModuleType) + + if runtime_check_state == RuntimeTypeCheckState.ERRORS: + # Enable runtime type checking which errors on any type hint violation. + return _beartype_lib.beartype + + # Warnings only + def beartype(func): + """Warn on type hint violation.""" + + if "return" in func.__annotations__: + # Remove the return type from the func function's + # annotations so that the beartype decorator does not complain + # about the return type. + return_type = func.__annotations__["return"] + del func.__annotations__["return"] + beartyped = _beartype_lib.beartype(func) + # Restore the return type to the func function's annotations + func.__annotations__["return"] = return_type + else: + beartyped = _beartype_lib.beartype(func) + + @functools.wraps(func) + def _coerce_beartype_exceptions_to_warnings(*args, **kwargs): + try: + return beartyped(*args, **kwargs) + except _roar.BeartypeCallHintParamViolation: + # Fall back to the original function if the beartype hint is violated. + warnings.warn( + traceback.format_exc(), + category=CallHintViolationWarning, + stacklevel=2, + ) + + return func(*args, **kwargs) # noqa: B012 + + return _coerce_beartype_exceptions_to_warnings + + return beartype + + +if typing.TYPE_CHECKING: + # This is a hack to make mypy play nicely with the beartype decorator. + def beartype(func): + return func + +else: + _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK = os.getenv( + "TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK" + ) + if _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK == "WARNINGS": + _runtime_type_check_state = RuntimeTypeCheckState.WARNINGS + elif _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK == "DISABLED": + _runtime_type_check_state = RuntimeTypeCheckState.DISABLED + else: + _runtime_type_check_state = RuntimeTypeCheckState.ERRORS + beartype = _create_beartype_decorator(_runtime_type_check_state) + # Make sure that the beartype decorator is enabled whichever path we took. + assert beartype is not None diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cae5b247d5cd5b88511c5be0346170bfce535969 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py @@ -0,0 +1,21 @@ +from ._diagnostic import ( + create_export_diagnostic_context, + diagnose, + engine, + export_context, + ExportDiagnosticEngine, + TorchScriptOnnxExportDiagnostic, +) +from ._rules import rules +from .infra import levels + +__all__ = [ + "TorchScriptOnnxExportDiagnostic", + "ExportDiagnosticEngine", + "rules", + "levels", + "engine", + "export_context", + "create_export_diagnostic_context", + "diagnose", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a81a0f2e29d649307729de1581570d86335339e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61297e182d945281d0d8d0d513e050b30b6d4c44 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_diagnostic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10819f0c4dd48ed9f6ca230f139b8afb2099d437 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__pycache__/_rules.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py new file mode 100644 index 0000000000000000000000000000000000000000..09079d5e9c4a47a43af82c3b36be736eee4f3370 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_diagnostic.py @@ -0,0 +1,212 @@ +"""Diagnostic components for TorchScript based ONNX export, i.e. `torch.onnx.export`.""" +from __future__ import annotations + +import contextlib +import gzip +from collections.abc import Generator +from typing import List, Optional + +import torch + +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.diagnostics.infra import formatter, sarif +from torch.onnx._internal.diagnostics.infra.sarif import version as sarif_version +from torch.utils import cpp_backtrace + + +def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack: + """Returns the current C++ call stack. + + This function utilizes `torch.utils.cpp_backtrace` to get the current C++ call stack. + The returned C++ call stack is a concatenated string of the C++ call stack frames. + Each frame is separated by a newline character, in the same format of + r"frame #[0-9]+: (?P.*)". More info at `c10/util/Backtrace.cpp`. + + """ + # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. + frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split("\n") + frame_messages = [] + for frame in frames: + segments = frame.split(":", 1) + if len(segments) == 2: + frame_messages.append(segments[1].strip()) + else: + frame_messages.append("") + return infra.Stack( + frames=[ + infra.StackFrame(location=infra.Location(message=message)) + for message in frame_messages + ] + ) + + +class TorchScriptOnnxExportDiagnostic(infra.Diagnostic): + """Base class for all export diagnostics. + + This class is used to represent all export diagnostics. It is a subclass of + infra.Diagnostic, and adds additional methods to add more information to the + diagnostic. + """ + + python_call_stack: Optional[infra.Stack] = None + cpp_call_stack: Optional[infra.Stack] = None + + def __init__( + self, + *args, + frames_to_skip: int = 1, + cpp_stack: bool = False, + **kwargs, + ) -> None: + super().__init__(*args, **kwargs) + self.python_call_stack = self.record_python_call_stack( + frames_to_skip=frames_to_skip + ) + if cpp_stack: + self.cpp_call_stack = self.record_cpp_call_stack( + frames_to_skip=frames_to_skip + ) + + def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack: + """Records the current C++ call stack in the diagnostic.""" + # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. + # No need to skip this function because python frame is not recorded + # in cpp call stack. + stack = _cpp_call_stack(frames_to_skip=frames_to_skip) + stack.message = "C++ call stack" + self.with_stack(stack) + return stack + + +class ExportDiagnosticEngine: + """PyTorch ONNX Export diagnostic engine. + + The only purpose of creating this class instead of using `DiagnosticContext` directly + is to provide a background context for `diagnose` calls inside exporter. + + By design, one `torch.onnx.export` call should initialize one diagnostic context. + All `diagnose` calls inside exporter should be made in the context of that export. + However, since diagnostic context is currently being accessed via a global variable, + there is no guarantee that the context is properly initialized. Therefore, we need + to provide a default background context to fallback to, otherwise any invocation of + exporter internals, e.g. unit tests, will fail due to missing diagnostic context. + This can be removed once the pipeline for context to flow through the exporter is + established. + """ + + contexts: List[infra.DiagnosticContext] + _background_context: infra.DiagnosticContext + + def __init__(self) -> None: + self.contexts = [] + self._background_context = infra.DiagnosticContext( + name="torch.onnx", + version=torch.__version__, + ) + + @property + def background_context(self) -> infra.DiagnosticContext: + return self._background_context + + def create_diagnostic_context( + self, + name: str, + version: str, + options: Optional[infra.DiagnosticOptions] = None, + ) -> infra.DiagnosticContext: + """Creates a new diagnostic context. + + Args: + name: The subject name for the diagnostic context. + version: The subject version for the diagnostic context. + options: The options for the diagnostic context. + + Returns: + A new diagnostic context. + """ + if options is None: + options = infra.DiagnosticOptions() + context: infra.DiagnosticContext[infra.Diagnostic] = infra.DiagnosticContext( + name, version, options + ) + self.contexts.append(context) + return context + + def clear(self): + """Clears all diagnostic contexts.""" + self.contexts.clear() + self._background_context.diagnostics.clear() + + def to_json(self) -> str: + return formatter.sarif_to_json(self.sarif_log()) + + def dump(self, file_path: str, compress: bool = False) -> None: + """Dumps the SARIF log to a file.""" + if compress: + with gzip.open(file_path, "wt") as f: + f.write(self.to_json()) + else: + with open(file_path, "w") as f: + f.write(self.to_json()) + + def sarif_log(self): + log = sarif.SarifLog( + version=sarif_version.SARIF_VERSION, + schema_uri=sarif_version.SARIF_SCHEMA_LINK, + runs=[context.sarif() for context in self.contexts], + ) + + log.runs.append(self._background_context.sarif()) + return log + + +engine = ExportDiagnosticEngine() +_context = engine.background_context + + +@contextlib.contextmanager +def create_export_diagnostic_context() -> ( + Generator[infra.DiagnosticContext, None, None] +): + """Create a diagnostic context for export. + + This is a workaround for code robustness since diagnostic context is accessed by + export internals via global variable. See `ExportDiagnosticEngine` for more details. + """ + global _context + assert ( + _context == engine.background_context + ), "Export context is already set. Nested export is not supported." + _context = engine.create_diagnostic_context( + "torch.onnx.export", + torch.__version__, + ) + try: + yield _context + finally: + _context = engine.background_context + + +def diagnose( + rule: infra.Rule, + level: infra.Level, + message: Optional[str] = None, + frames_to_skip: int = 2, + **kwargs, +) -> TorchScriptOnnxExportDiagnostic: + """Creates a diagnostic and record it in the global diagnostic context. + + This is a wrapper around `context.log` that uses the global diagnostic + context. + """ + # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. + diagnostic = TorchScriptOnnxExportDiagnostic( + rule, level, message, frames_to_skip=frames_to_skip, **kwargs + ) + export_context().log(diagnostic) + return diagnostic + + +def export_context() -> infra.DiagnosticContext: + global _context + return _context diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..4bafac8f7f50072694a175e7a352e8c534f268ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/_rules.py @@ -0,0 +1,634 @@ +""" +GENERATED CODE - DO NOT EDIT DIRECTLY +This file is generated by gen_diagnostics.py. +See tools/onnx/gen_diagnostics.py for more information. + +Diagnostic rules for PyTorch ONNX export. +""" + +import dataclasses +from typing import Tuple + +# flake8: noqa +from torch.onnx._internal.diagnostics import infra + +""" +GENERATED CODE - DO NOT EDIT DIRECTLY +The purpose of generating a class for each rule is to override the `format_message` +method to provide more details in the signature about the format arguments. +""" + + +class _NodeMissingOnnxShapeInference(infra.Rule): + """Node is missing ONNX shape inference.""" + + def format_message(self, op_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.' + """ + return self.message_default_template.format(op_name=op_name) + + def format( # type: ignore[override] + self, level: infra.Level, op_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.' + """ + return self, level, self.format_message(op_name=op_name) + + +class _MissingCustomSymbolicFunction(infra.Rule): + """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.""" + + def format_message(self, op_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.' + """ + return self.message_default_template.format(op_name=op_name) + + def format( # type: ignore[override] + self, level: infra.Level, op_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.' + """ + return self, level, self.format_message(op_name=op_name) + + +class _MissingStandardSymbolicFunction(infra.Rule): + """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.""" + + def format_message( # type: ignore[override] + self, op_name, opset_version, issue_url + ) -> str: + """Returns the formatted default message of this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." + """ + return self.message_default_template.format( + op_name=op_name, opset_version=opset_version, issue_url=issue_url + ) + + def format( # type: ignore[override] + self, level: infra.Level, op_name, opset_version, issue_url + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." + """ + return ( + self, + level, + self.format_message( + op_name=op_name, opset_version=opset_version, issue_url=issue_url + ), + ) + + +class _OperatorSupportedInNewerOpsetVersion(infra.Rule): + """Operator is supported in newer opset version.""" + + def format_message( # type: ignore[override] + self, op_name, opset_version, supported_opset_version + ) -> str: + """Returns the formatted default message of this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." + """ + return self.message_default_template.format( + op_name=op_name, + opset_version=opset_version, + supported_opset_version=supported_opset_version, + ) + + def format( # type: ignore[override] + self, level: infra.Level, op_name, opset_version, supported_opset_version + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." + """ + return ( + self, + level, + self.format_message( + op_name=op_name, + opset_version=opset_version, + supported_opset_version=supported_opset_version, + ), + ) + + +class _FxGraphToOnnx(infra.Rule): + """Transforms graph from FX IR to ONNX IR.""" + + def format_message(self, graph_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Transforming FX graph {graph_name} to ONNX graph.' + """ + return self.message_default_template.format(graph_name=graph_name) + + def format( # type: ignore[override] + self, level: infra.Level, graph_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Transforming FX graph {graph_name} to ONNX graph.' + """ + return self, level, self.format_message(graph_name=graph_name) + + +class _FxNodeToOnnx(infra.Rule): + """Transforms an FX node to an ONNX node.""" + + def format_message(self, node_repr) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Transforming FX node {node_repr} to ONNX node.' + """ + return self.message_default_template.format(node_repr=node_repr) + + def format( # type: ignore[override] + self, level: infra.Level, node_repr + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Transforming FX node {node_repr} to ONNX node.' + """ + return self, level, self.format_message(node_repr=node_repr) + + +class _FxPass(infra.Rule): + """FX graph transformation during ONNX export before converting from FX IR to ONNX IR.""" + + def format_message(self, pass_name) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Running {pass_name} pass.' + """ + return self.message_default_template.format(pass_name=pass_name) + + def format( # type: ignore[override] + self, level: infra.Level, pass_name + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Running {pass_name} pass.' + """ + return self, level, self.format_message(pass_name=pass_name) + + +class _NoSymbolicFunctionForCallFunction(infra.Rule): + """Cannot find symbolic function to convert the "call_function" FX node to ONNX.""" + + def format_message(self, target) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. ' + """ + return self.message_default_template.format(target=target) + + def format( # type: ignore[override] + self, level: infra.Level, target + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. ' + """ + return self, level, self.format_message(target=target) + + +class _UnsupportedFxNodeAnalysis(infra.Rule): + """Result from FX graph analysis to reveal unsupported FX nodes.""" + + def format_message( # type: ignore[override] + self, node_op_to_target_mapping + ) -> str: + """Returns the formatted default message of this Rule. + + Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. ' + """ + return self.message_default_template.format( + node_op_to_target_mapping=node_op_to_target_mapping + ) + + def format( # type: ignore[override] + self, level: infra.Level, node_op_to_target_mapping + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. ' + """ + return ( + self, + level, + self.format_message(node_op_to_target_mapping=node_op_to_target_mapping), + ) + + +class _OpLevelDebugging(infra.Rule): + """Report any op level validation failure in warnings.""" + + def format_message(self, node, symbolic_fn) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.' + """ + return self.message_default_template.format(node=node, symbolic_fn=symbolic_fn) + + def format( # type: ignore[override] + self, level: infra.Level, node, symbolic_fn + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.' + """ + return self, level, self.format_message(node=node, symbolic_fn=symbolic_fn) + + +class _FindOpschemaMatchedSymbolicFunction(infra.Rule): + """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas.""" + + def format_message(self, symbolic_fn, node) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.' + """ + return self.message_default_template.format(symbolic_fn=symbolic_fn, node=node) + + def format( # type: ignore[override] + self, level: infra.Level, symbolic_fn, node + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.' + """ + return self, level, self.format_message(symbolic_fn=symbolic_fn, node=node) + + +class _FxNodeInsertTypePromotion(infra.Rule): + """Determine if type promotion is required for the FX node. Insert cast nodes if needed.""" + + def format_message(self, target) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Performing explicit type promotion for node {target}. ' + """ + return self.message_default_template.format(target=target) + + def format( # type: ignore[override] + self, level: infra.Level, target + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Performing explicit type promotion for node {target}. ' + """ + return self, level, self.format_message(target=target) + + +class _FindOperatorOverloadsInOnnxRegistry(infra.Rule): + """Find the list of OnnxFunction of the PyTorch operator in onnx registry.""" + + def format_message(self, node) -> str: # type: ignore[override] + """Returns the formatted default message of this Rule. + + Message template: 'Checking if the FX node: {node} is supported in onnx registry.' + """ + return self.message_default_template.format(node=node) + + def format( # type: ignore[override] + self, level: infra.Level, node + ) -> Tuple[infra.Rule, infra.Level, str]: + """Returns a tuple of (Rule, Level, message) for this Rule. + + Message template: 'Checking if the FX node: {node} is supported in onnx registry.' + """ + return self, level, self.format_message(node=node) + + +@dataclasses.dataclass +class _POERules(infra.RuleCollection): + node_missing_onnx_shape_inference: _NodeMissingOnnxShapeInference = dataclasses.field( + default=_NodeMissingOnnxShapeInference.from_sarif( + **{ + "id": "POE0001", + "name": "node-missing-onnx-shape-inference", + "short_description": {"text": "Node is missing ONNX shape inference."}, + "full_description": { + "text": "Node is missing ONNX shape inference. This usually happens when the node is not valid under standard ONNX operator spec.", + "markdown": "Node is missing ONNX shape inference.\nThis usually happens when the node is not valid under standard ONNX operator spec.\n", + }, + "message_strings": { + "default": { + "text": "The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Node is missing ONNX shape inference.""" + + missing_custom_symbolic_function: _MissingCustomSymbolicFunction = dataclasses.field( + default=_MissingCustomSymbolicFunction.from_sarif( + **{ + "id": "POE0002", + "name": "missing-custom-symbolic-function", + "short_description": { + "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX." + }, + "full_description": { + "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.", + "markdown": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.\n", + }, + "message_strings": { + "default": { + "text": "ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.""" + + missing_standard_symbolic_function: _MissingStandardSymbolicFunction = dataclasses.field( + default=_MissingStandardSymbolicFunction.from_sarif( + **{ + "id": "POE0003", + "name": "missing-standard-symbolic-function", + "short_description": { + "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX." + }, + "full_description": { + "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.", + "markdown": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.\n", + }, + "message_strings": { + "default": { + "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.""" + + operator_supported_in_newer_opset_version: _OperatorSupportedInNewerOpsetVersion = dataclasses.field( + default=_OperatorSupportedInNewerOpsetVersion.from_sarif( + **{ + "id": "POE0004", + "name": "operator-supported-in-newer-opset-version", + "short_description": { + "text": "Operator is supported in newer opset version." + }, + "full_description": { + "text": "Operator is supported in newer opset version.", + "markdown": "Operator is supported in newer opset version.\n\nExample:\n```python\ntorch.onnx.export(model, args, ..., opset_version=9)\n```\n", + }, + "message_strings": { + "default": { + "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Operator is supported in newer opset version.""" + + fx_graph_to_onnx: _FxGraphToOnnx = dataclasses.field( + default=_FxGraphToOnnx.from_sarif( + **{ + "id": "FXE0007", + "name": "fx-graph-to-onnx", + "short_description": { + "text": "Transforms graph from FX IR to ONNX IR." + }, + "full_description": { + "text": "Transforms graph from FX IR to ONNX IR.", + "markdown": "This diagnostic tracks the transformation process from an FX Graph (in FX IR) to an ONNX Graph (in ONNX IR).\n\n## Key Representations:\n\n- **FX Graph**: The graph in FX IR produced by dynamo or symbolic tracing.\n- **ONNX Graph**: The graph in ONNX IR and [operators](https://onnx.ai/onnx/operators/).\n\n## Additional Notes:\n\n- Prior to this transformation step, the FX graph undergoes preprocessing through multiple FX passes.\n To gain insight into these transformations, refer to diagnostic `FXE0010`.\n- To enable a detailed view of the graph transformation in progress within this diagnostic, switch to the DEBUG mode.\n\n - Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n - Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\n- For specific information related to node-level FX to ONNX transformations, explore the diagnostic `FXE0008`.\n", + }, + "message_strings": { + "default": { + "text": "Transforming FX graph {graph_name} to ONNX graph." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Transforms graph from FX IR to ONNX IR.""" + + fx_node_to_onnx: _FxNodeToOnnx = dataclasses.field( + default=_FxNodeToOnnx.from_sarif( + **{ + "id": "FXE0008", + "name": "fx-node-to-onnx", + "short_description": {"text": "Transforms an FX node to an ONNX node."}, + "full_description": { + "text": "Transforms an FX node to an ONNX node.", + "markdown": "This diagnostic tracks the transformation process from an FX Node to ONNX [Operators](https://onnx.ai/onnx/operators/).\n\nThe process of converting FX Node to ONNX Node involves dealing with six distinct node types:\n 1. `placeholder`: Represents a module input, maps to an ONNX graph input.\n 2. `call_module`: Symbolizes a call to a submodule, maps to an ONNX\n 3. `call_method`: Symbolizes a method call. Not yet implemented.\n 4. `call_function`: Symbolizes a function call. [Core ATen](https://pytorch.org/docs/stable/ir.html#core-aten-ir) is expected\n as the function call target. The mapping from ATen to ONNX is implemented by [ONNXScript torchlib](https://github.com/microsoft/onnxscript/tree/main/onnxscript/function_libs/torch_lib/ops).\n This [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) shows how to write and register a custom symbolic function for call_function FX node.\n 5. `get_attr`: Indicates an attribute access within the current module. Maps to an ONNX graph initializer.\n 6. `output`: Represents the module's output. Maps to an ONNX graph output.\n\nFor a granular understanding of how each node type is transformed, refer to the implementation details in `FxOnnxInterpreter`.\n", + }, + "message_strings": { + "default": { + "text": "Transforming FX node {node_repr} to ONNX node." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Transforms an FX node to an ONNX node.""" + + fx_pass: _FxPass = dataclasses.field( + default=_FxPass.from_sarif( + **{ + "id": "FXE0010", + "name": "fx-pass", + "short_description": { + "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR." + }, + "full_description": { + "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR.", + "markdown": "This diagnostic tracks the FX passes executed during the ONNX export process prior\nto converting from FX IR (Intermediate Representation) to ONNX IR.\n\nUnder the scope of ONNX export, an FX pass refers to a specific transformation applied to the FX GraphModule.\nThe primary aim of these passes is to streamline the graph into a format that aligns more with the ONNX IR.\nMoreover, these passes work to substitute unsupported FX IR features with those recognized and endorsed by\nONNX IR. Common transformations include, but aren't limited to, decomposition, functionalization and\ntype promotion.\n\nFor those who are interested in a comprehensive log detailing the modifications made during these passes,\nthere are a couple of options:\n\n- Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n- Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\nHowever, it's noteworthy that by default, such detailed logging is turned off. The primary reason being\nits considerable impact on performance.\n\nFor an in-depth understanding of each specific pass, please refer to the directory: torch/onnx/_internal/fx/passes.\n", + }, + "message_strings": {"default": {"text": "Running {pass_name} pass."}}, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """FX graph transformation during ONNX export before converting from FX IR to ONNX IR.""" + + no_symbolic_function_for_call_function: _NoSymbolicFunctionForCallFunction = dataclasses.field( + default=_NoSymbolicFunctionForCallFunction.from_sarif( + **{ + "id": "FXE0011", + "name": "no-symbolic-function-for-call-function", + "short_description": { + "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX.' + }, + "full_description": { + "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX. ', + "markdown": 'This error occurs when the ONNX converter is unable to find a corresponding symbolic function\nto convert a "call_function" node in the input graph to its equivalence in ONNX. The "call_function"\nnode represents a normalized function call in PyTorch, such as "torch.aten.ops.add".\n\nTo resolve this error, you can try one of the following:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n\nTODO: Replace above link once docs for `dynamo_export` custom op registration are available.\n', + }, + "message_strings": { + "default": { + "text": 'No symbolic function to convert the "call_function" node {target} to ONNX. ' + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Cannot find symbolic function to convert the "call_function" FX node to ONNX.""" + + unsupported_fx_node_analysis: _UnsupportedFxNodeAnalysis = dataclasses.field( + default=_UnsupportedFxNodeAnalysis.from_sarif( + **{ + "id": "FXE0012", + "name": "unsupported-fx-node-analysis", + "short_description": { + "text": "Result from FX graph analysis to reveal unsupported FX nodes." + }, + "full_description": { + "text": "Result from FX graph analysis to reveal unsupported FX nodes.", + "markdown": "This error indicates that an FX graph contains one or more unsupported nodes. The error message\nis typically accompanied by a list of the unsupported nodes found during analysis.\n\nTo resolve this error, you can try resolving each individual unsupported node error by following\nthe suggestions by its diagnostic. Typically, options include:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) to write and\n register a custom symbolic function for the unsupported call_function FX node.\n", + }, + "message_strings": { + "default": { + "text": "Unsupported FX nodes: {node_op_to_target_mapping}. " + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Result from FX graph analysis to reveal unsupported FX nodes.""" + + op_level_debugging: _OpLevelDebugging = dataclasses.field( + default=_OpLevelDebugging.from_sarif( + **{ + "id": "FXE0013", + "name": "op-level-debugging", + "short_description": { + "text": "Report any op level validation failure in warnings." + }, + "full_description": { + "text": "Report any op level validation failure in warnings.", + "markdown": "This warning message indicates that during op level debugging, certain symbolic functions\nhave failed to match the results of torch ops when using real tensors generated from fake\ntensors. It is important to note that the symbolic functions may not necessarily be\nincorrect, as the validation process is non-deterministic and should only be used as a\nreference.\n\nThere are two categories of warnings that can be triggered:\n\n1. Non-validated operators:\n If the warnings are caused by the following errors, they can be disregarded by users,\n as these errors occur due to the non-deterministic nature of the validation. However,\n it is important to be aware that the operators have not been validated.\n\n - IndexError: Unsupported input arguments of randomized dimensions/indices(INT64).\n - RuntimeError: Unsupported input arguments for torch ops are generated.\n - ValueError: Arguments/keyword arguments do not match the signature of the symbolic function.\n\n2. Potentially wrong torchlib operators:\n If the warnings are triggered by the following error, users should be aware that the symbolic functions\n may be incorrect in dispatching or implementation. In such cases, it is recommended to report\n the issue to the PyTorch-ONNX team, or create/register a custom symbolic function to replace the default one.\n\n - AssertionError: The symbolic function is potentially wrong as the results do not match the results of torch ops.\n - TypeError: The symbolic function is potentially wrong as the opschema doesn't match inputs.\n", + }, + "message_strings": { + "default": { + "text": "FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Report any op level validation failure in warnings.""" + + find_opschema_matched_symbolic_function: _FindOpschemaMatchedSymbolicFunction = dataclasses.field( + default=_FindOpschemaMatchedSymbolicFunction.from_sarif( + **{ + "id": "FXE0014", + "name": "find-opschema-matched-symbolic-function", + "short_description": { + "text": "Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas." + }, + "full_description": { + "text": "Find the OnnxFunction that matches the input dtypes by comparing them with their opschemas. A warning will be issued if the matched OnnxFunction is not an exact match.", + "markdown": "When an ATen/Custom operator is registered and needs to be dispatched to an OnnxFunction, the input/attribute\ndtypes of the ATen/Custom operator are compared with the input/attribute dtypes of the OnnxFunction opschemas\nto find a match. However, if a perfect/exact match is not found, the dispatcher will attempt to find\nthe nearest match with the highest number of input/attribute dtypes matching the OnnxFunction opschemas, while\nissuing a warning.\n\nThere are two types of level that can be triggered in this rule:\n\n1. NOTE: A perfect match is found, and no warning is issued.\n2. WARNING: The matched OnnxFunction is not a perfect/exact match.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning,\n as the definition of OnnxFunction schema is usually more stringent.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the issue to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n", + }, + "message_strings": { + "default": { + "text": "The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas.""" + + fx_node_insert_type_promotion: _FxNodeInsertTypePromotion = dataclasses.field( + default=_FxNodeInsertTypePromotion.from_sarif( + **{ + "id": "FXE0015", + "name": "fx-node-insert-type-promotion", + "short_description": { + "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed." + }, + "full_description": { + "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed.", + "markdown": "This diagnostic monitors the node-level type promotion insertion process. In PyTorch, there is an automatic process called implicit type promotion,\nwhere the input types of an operator are promoted to a common type. The determination of the common type is based on the type promotion rule specific to each operator.\nTo learn more about PyTorch's type promotion rules, refer to the [elementwise_dtypes doc](https://github.com/pytorch/pytorch/blob/f044613f78df713fb57f70c608483c9f10ad332e/torch/_prims_common/__init__.py#L1252-L1335)\nand [torch._refs ops](https://github.com/pytorch/pytorch/blob/a475ea4542dfe961c9d097e33ab5041f61c8c17f/torch/_refs/__init__.py#L484).\n\nHowever, implicit type promotion is not supported in ONNX. Therefore, to replicate the PyTorch behavior, we need to explicitly insert cast nodes.\nThis diagnostic tracks the process of node-level type promotion insertion.\n\nThe type promotion rules used by this process can be found in `torch/onnx/_internal/fx/passes/type_promotion.py.`\nTo update or add new type promotion rules, please refer to the [Note: Update type promotion rule] section.\n", + }, + "message_strings": { + "default": { + "text": "Performing explicit type promotion for node {target}. " + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Determine if type promotion is required for the FX node. Insert cast nodes if needed.""" + + find_operator_overloads_in_onnx_registry: _FindOperatorOverloadsInOnnxRegistry = dataclasses.field( + default=_FindOperatorOverloadsInOnnxRegistry.from_sarif( + **{ + "id": "FXE0016", + "name": "find-operator-overloads-in-onnx-registry", + "short_description": { + "text": "Find the list of OnnxFunction of the PyTorch operator in onnx registry." + }, + "full_description": { + "text": "This rule involves finding the list of OnnxFunction for the PyTorch operator overload in the ONNX registry. If the operator overload is not supported but its default overload is, a warning will be issued. If both the operator overload and its default overload are not supported, an error will be issued.", + "markdown": "The operator overload name serves the purpose of verifying whether a PyTorch operator is registered in the ONNX registry.\nIf it's not found, the dispatcher takes a fallback approach and tries to locate the default overload of the PyTorch\noperator in the registry. If even the default overload is absent, it signifies that the operator is officially unsupported.\n\nThere are three types of level that can be triggered in this rule:\n\n1. NOTE: The op overload is supported.\n2. WARNING: The op overload is not supported, but it's default overload is supported.\n3. ERROR: The op overload is not supported, and it's default overload is also not supported.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning.\n2. If there are errors or mismatches in the results, it is recommended to:\n (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n (b) Report the unsupported overload to the PyTorch-ONNX team.\n (c) Create/register a custom symbolic function to replace the default one.\n\nHere are some suggestions based on the ERROR situation:\n\n1. Report the unsupported operator to the PyTorch-ONNX team.\n2. Create/register a custom symbolic function to replace the default one.\n", + }, + "message_strings": { + "default": { + "text": "Checking if the FX node: {node} is supported in onnx registry." + } + }, + "help_uri": None, + "properties": {"deprecated": False, "tags": []}, + } + ), + init=False, + ) + """Find the list of OnnxFunction of the PyTorch operator in onnx registry.""" + + +rules = _POERules() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/exporter.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/exporter.py new file mode 100644 index 0000000000000000000000000000000000000000..5b7a3628116a4019a428a63034714bcf9e7bac3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/exporter.py @@ -0,0 +1,1539 @@ +from __future__ import ( # for onnx.ModelProto (ONNXProgram) and onnxruntime (ONNXRuntimeOptions) + annotations, +) + +import abc + +import contextlib +import dataclasses +import io +import logging +import os + +import warnings +from collections import defaultdict +from typing import ( + Any, + Callable, + Dict, + Final, + List, + Mapping, + Optional, + Protocol, + runtime_checkable, + Sequence, + Set, + Tuple, + TYPE_CHECKING, + TypeVar, + Union, +) + +from typing_extensions import Self + +import torch + +import torch._ops +import torch.export as torch_export +import torch.utils._pytree as pytree +from torch._subclasses import fake_tensor + +from torch.onnx._internal import _beartype, io_adapter +from torch.onnx._internal.diagnostics import infra +from torch.onnx._internal.fx import ( + decomposition_table, + patcher as patcher, + registration, + serialization as fx_serialization, +) + +# We can only import onnx from this module in a type-checking context to ensure that +# 'import torch.onnx' continues to work without having 'onnx' installed. We fully +# 'import onnx' inside of dynamo_export (by way of _assert_dependencies). +if TYPE_CHECKING: + import onnx + import onnxruntime # type: ignore[import] + import onnxscript # type: ignore[import] + from onnxscript.function_libs.torch_lib import ( # type: ignore[import] + registration as torchlib_registry, + ) + + from torch.onnx._internal.fx import diagnostics +else: + try: + # beartype needs this import due to runtime type checking. + # This cannot be normally imported at top level due to + # https://github.com/pytorch/pytorch/issues/103764 + from torch.onnx._internal.fx import diagnostics + except ImportError: + # The error will be handled elsewhere when the exporter is used. + pass + +_DEFAULT_OPSET_VERSION: Final[int] = 18 +"""The default ONNX opset version the exporter will use if one is not specified explicitly +through :class:`ExportOptions`. This should NEVER be accessed outside of this module! Users +should reference :attr:`ExportOptions.opset_version`.""" + +_PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" +"""The URL to the PyTorch GitHub issues page.""" + +_DEFAULT_FAILED_EXPORT_SARIF_LOG_PATH = "report_dynamo_export.sarif" +"""The default path to write the SARIF log to if the export fails.""" + +_PROTOBUF_SIZE_MAX_LIMIT = 2 * 1024 * 1024 * 1024 +"""The maximum size of a Protobuf file in bytes. This is used to determine whether to +serialize the model with external data or not.""" + +log = logging.getLogger(__name__) + + +DiagnosticOptions = infra.DiagnosticOptions + + +@dataclasses.dataclass +class ONNXFakeContext: + """A dataclass used to store context for model export using FakeTensor. + + This dataclass stores the FakeTensorMode instance used to convert + real tensors and model parameters into fake tensors. This :attr:`ONNXFakeContext.fake_mode` is + reused internally during tracing of a :class:`torch.nn.Module` into a FX :class:`GraphModule`. + """ + + fake_mode: fake_tensor.FakeTensorMode + """The fake tensor mode used for tracing model using fake tensors and parameters.""" + + state_dict_paths: Optional[Tuple[Union[str, io.BytesIO]]] = None + """List of paths of files that contain the model :meth:`state_dict`""" + + +class OnnxRegistry: + """Registry for ONNX functions. + + The registry maintains a mapping from qualified names to symbolic functions under a + fixed opset version. It supports registering custom onnx-script functions and for + dispatcher to dispatch calls to the appropriate function. + + """ + + def __init__(self) -> None: + """Initializes the registry""" + + # NOTE: _registry is the registry maps OpNameto a list of ONNXFunctions. It is important + # not to directly modify this variable. Instead, access to it should be done through + # the public methods: register_custom_op, get_ops, and is_registered_op. + self._registry: Dict[ + registration.OpName, List[registration.ONNXFunction] + ] = defaultdict(list) + # FIXME: Avoid importing onnxscript into torch + from onnxscript.function_libs.torch_lib import ( # type: ignore[import] # noqa: F401 + ops, # TODO(titaiwang): get rid of this import + registration, + ) + + # opset_version is unused for now, since torchlib only supports opset18. + # TODO: get opset version from torchlib + self._opset_version = _DEFAULT_OPSET_VERSION + warnings.warn( + f"torch.onnx.dynamo_export only implements opset version {self._opset_version} for now. If you need to use a " + "different opset version, please register them with register_custom_op." + ) + + # Initialize registry from torchlib + self._initiate_registry_from_torchlib(registration.default_registry) + + @property + def opset_version(self) -> int: + """The ONNX opset version the exporter should target. Defaults to the latest + supported ONNX opset version: 18. The default version will increment over time as + ONNX continues to evolve.""" + + return self._opset_version + + # TODO(titaiwang): subject to change if multiple opset_version is supported in torchlib + def _initiate_registry_from_torchlib( + self, torchlib_registry: torchlib_registry.Registry + ): + """Populates the registry with ATen functions from torchlib. + + Args: + torchlib_registry: The torchlib registry to use for populating the registry. + """ + for aten_name, aten_overloads_func in torchlib_registry.items(): + internal_name_instance = registration.OpName.from_qualified_name(aten_name) + for overload_func in aten_overloads_func.overloads: + symbolic_function = registration.ONNXFunction( + onnx_function=overload_func, + op_full_name=internal_name_instance.qualified_name(), + is_custom=False, + is_complex=False, + ) + self._register(internal_name_instance, symbolic_function) + + for complex_func in aten_overloads_func.complex: + symbolic_function = registration.ONNXFunction( + onnx_function=complex_func, + op_full_name=internal_name_instance.qualified_name(), + is_custom=False, + is_complex=True, + ) + self._register(internal_name_instance, symbolic_function) + + @_beartype.beartype + def _register( + self, + internal_qualified_name: registration.OpName, + symbolic_function: registration.ONNXFunction, + ) -> None: + """Registers a ONNXFunction to an operator. + + Args: + internal_qualified_name: The qualified name of the operator to register: OpName. + symbolic_function: The ONNXFunction to register. + """ + self._registry[internal_qualified_name].append(symbolic_function) + + @_beartype.beartype + def register_op( + self, + function: Union["onnxscript.OnnxFunction", "onnxscript.TracedOnnxFunction"], + namespace: str, + op_name: str, + overload: Optional[str] = None, + is_complex: bool = False, + ) -> None: + """Registers a custom operator: torch.ops.... + + Args: + function: The onnx-sctip function to register. + namespace: The namespace of the operator to register. + op_name: The name of the operator to register. + overload: The overload of the operator to register. If it's default overload, + leave it to None. + is_complex: Whether the function is a function that handles complex valued inputs. + + Raises: + ValueError: If the name is not in the form of 'namespace::op'. + """ + internal_name_instance = registration.OpName.from_name_parts( + namespace=namespace, op_name=op_name, overload=overload + ) + symbolic_function = registration.ONNXFunction( + onnx_function=function, + op_full_name=internal_name_instance.qualified_name(), + is_custom=True, + is_complex=is_complex, + ) + self._register(internal_name_instance, symbolic_function) + + @_beartype.beartype + def get_op_functions( + self, namespace: str, op_name: str, overload: Optional[str] = None + ) -> Optional[List[registration.ONNXFunction]]: + """Returns a list of ONNXFunctions for the given op: torch.ops.... + + The list is ordered by the time of registration. The custom operators should be + in the second half of the list. + + Args: + namespace: The namespace of the operator to get. + op_name: The name of the operator to get. + overload: The overload of the operator to get. If it's default overload, + leave it to None. + Returns: + A list of ONNXFunctions corresponding to the given name, or None if + the name is not in the registry. + """ + internal_name_instance = registration.OpName.from_name_parts( + namespace=namespace, op_name=op_name, overload=overload + ) + return self._registry.get(internal_name_instance) + + @_beartype.beartype + def is_registered_op( + self, namespace: str, op_name: str, overload: Optional[str] = None + ) -> bool: + """Returns whether the given op is registered: torch.ops.... + + Args: + namespace: The namespace of the operator to check. + op_name: The name of the operator to check. + overload: The overload of the operator to check. If it's default overload, + leave it to None. + + Returns: + True if the given op is registered, otherwise False. + """ + functions = self.get_op_functions( + namespace=namespace, op_name=op_name, overload=overload + ) + return functions is not None + + @_beartype.beartype + def _all_registered_ops(self) -> Set[str]: + """Returns the set of all registered function names.""" + return { + op_name_class.qualified_name() for op_name_class in self._registry.keys() + } + + +class ExportOptions: + """Options to influence the TorchDynamo ONNX exporter. + + Attributes: + dynamic_shapes: Shape information hint for input/output tensors. + When ``None``, the exporter determines the most compatible setting. + When ``True``, all input shapes are considered dynamic. + When ``False``, all input shapes are considered static. + op_level_debug: Whether to export the model with op-level debug information + diagnostic_options: The diagnostic options for the exporter. + fake_context: The fake context used for symbolic tracing. + onnx_registry: The ONNX registry used to register ATen operators to ONNX functions. + """ + + dynamic_shapes: Optional[bool] = None + """Shape information hint for input/output tensors. + + - ``None``: the exporter determines the most compatible setting. + - ``True``: all input shapes are considered dynamic. + - ``False``: all input shapes are considered static. + """ + + op_level_debug: Optional[bool] = None + """When True export the model with op-level debug running ops through ONNX Runtime.""" + + diagnostic_options: DiagnosticOptions + """The diagnostic options for the exporter.""" + + fake_context: Optional[ONNXFakeContext] = None + """The fake context used for symbolic tracing.""" + + onnx_registry: Optional[OnnxRegistry] = None + """The ONNX registry used to register ATen operators to ONNX functions.""" + + @_beartype.beartype + def __init__( + self, + *, + dynamic_shapes: Optional[bool] = None, + op_level_debug: Optional[bool] = None, + fake_context: Optional[ONNXFakeContext] = None, + onnx_registry: Optional[OnnxRegistry] = None, + diagnostic_options: Optional[DiagnosticOptions] = None, + ): + self.dynamic_shapes = dynamic_shapes + self.op_level_debug = op_level_debug + self.fake_context = fake_context + self.onnx_registry = onnx_registry + self.diagnostic_options = diagnostic_options or DiagnosticOptions() + + +class ResolvedExportOptions(ExportOptions): + """Consolidates :class:`ExportOptions` with default values. + All unspecified options from :class:`ExportOptions` are assigned a default value. + This is an internal class and its API may be changed at any time without notice. + """ + + # Public attributes MUST be redefined below without ``Optional[]`` from ``ExportOptions`` + dynamic_shapes: bool + op_level_debug: bool + diagnostic_options: DiagnosticOptions + fake_context: ONNXFakeContext + onnx_registry: OnnxRegistry + + # Private only attributes + decomposition_table: Dict[torch._ops.OpOverload, Callable] + """A dictionary that maps operators to their decomposition functions.""" + + onnxfunction_dispatcher: torch.onnx._internal.fx.onnxfunction_dispatcher.OnnxFunctionDispatcher + """The ONNX dispatcher used to dispatch ATen operators to ONNX functions.""" + + fx_tracer: FXGraphExtractor + """The FXGraphExtractor instance used to extract the FX graph from the model.""" + + diagnostic_context: diagnostics.DiagnosticContext + """The diagnostics context for the export. Responsible for recording diagnostics, + logging diagnostics, and generating the SARIF log.""" + + @_beartype.beartype + def __init__( + self, + options: Union[ExportOptions, "ResolvedExportOptions"], + model: Optional[Union[torch.nn.Module, Callable, torch_export.ExportedProgram]] = None, # type: ignore[name-defined] + ): + from torch.onnx._internal.fx import ( # TODO: Prevent circular dep + diagnostics, + dynamo_graph_extractor, + torch_export_graph_extractor, + ) + + if isinstance(options, ResolvedExportOptions): + self.dynamic_shapes = options.dynamic_shapes + self.op_level_debug = options.op_level_debug + self.diagnostic_options = options.diagnostic_options + self.fake_context = options.fake_context + # private + if isinstance(model, torch_export.ExportedProgram) and not isinstance( + options.fx_tracer, torch_export_graph_extractor.TorchExport + ): + message = "'model' of type 'ExportedProgram' is only supported with 'TorchExport' FX Tracer" + e = InvalidExportOptionsError(message) + raise InvalidExportOptionsError( + ONNXProgram._from_failure(e, options.diagnostic_context), message + ) + self.fx_tracer = options.fx_tracer + self.onnx_registry = options.onnx_registry + self.onnxfunction_dispatcher = options.onnxfunction_dispatcher + self.decomposition_table = options.decomposition_table + self.diagnostic_context = options.diagnostic_context + else: + T = TypeVar("T") + + @_beartype.beartype + def resolve(value: Optional[T], fallback: Union[T, Callable[[], T]]) -> T: + if value is not None: + return value + if callable(fallback): + return fallback() + return fallback + + self.dynamic_shapes = resolve(options.dynamic_shapes, False) + + self.diagnostic_options = resolve( + options.diagnostic_options, DiagnosticOptions() + ) + if isinstance(model, torch_export.ExportedProgram): + self.fx_tracer = torch_export_graph_extractor.TorchExport() + else: + self.fx_tracer = dynamo_graph_extractor.DynamoExport() + + self.fake_context = resolve(options.fake_context, None) + self.diagnostic_context = diagnostics.DiagnosticContext( + "torch.onnx.dynamo_export", + torch.__version__, + self.diagnostic_options, + ) + + self.onnx_registry = resolve(options.onnx_registry, OnnxRegistry()) + self.decomposition_table = ( + decomposition_table.create_onnx_friendly_decomposition_table( + self.onnx_registry + ) + ) + + # TODO(titaiwang, bowbao): Better way to annotate `onnxscript` types in diagnostics. + from torch.onnx._internal.fx import onnxfunction_dispatcher + + self.op_level_debug = resolve(options.op_level_debug, False) + self.onnxfunction_dispatcher = ( + onnxfunction_dispatcher.OnnxFunctionDispatcher( + self.onnx_registry, + self.diagnostic_context, + ) + ) + + for key in dir(options): + if not key.startswith("_"): # skip private attributes + assert hasattr(self, key), f"Unresolved option '{key}'" + + +@contextlib.contextmanager +def enable_fake_mode(): + """Enable fake mode for the duration of the context. + + Internally it instantiates a :class:`torch._subclasses.fake_tensor.FakeTensorMode` context manager + that converts user input and model parameters into :class:`torch._subclasses.fake_tensor.FakeTensor`. + + A :class:`torch._subclasses.fake_tensor.FakeTensor` + is a :class:`torch.Tensor` with the ability to run PyTorch code without having to + actually do computation through tensors allocated on a ``meta`` device. Because + there is no actual data being allocated on the device, this API allows for + exporting large models without the actual memory footprint needed for executing it. + + It is highly recommended to enable fake mode when exporting models that + are too large to fit into memory. + + Returns: + A :class:`ONNXFakeContext` object that must be passed to :func:`dynamo_export` + through the :attr:`ExportOptions.fake_context` argument. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> import torch.onnx + >>> class MyModel(torch.nn.Module): # Dummy model + ... def __init__(self) -> None: + ... super().__init__() + ... self.linear = torch.nn.Linear(2, 2) + ... def forward(self, x): + ... out = self.linear(x) + ... return out + >>> with torch.onnx.enable_fake_mode() as fake_context: + ... my_nn_module = MyModel() + ... arg1 = torch.randn(2, 2, 2) # positional input 1 + >>> export_options = torch.onnx.ExportOptions(fake_context=fake_context) + >>> onnx_program = torch.onnx.dynamo_export( + ... my_nn_module, + ... arg1, + ... export_options=export_options + ... ) + >>> # Saving model WITHOUT initializers + >>> onnx_program.save("my_model_without_initializers.onnx") + >>> # Saving model WITH initializers + >>> onnx_program.save("my_model_with_initializers.onnx", model_state_dict=MyModel().state_dict()) + + .. warning:: + This API is experimental and is *NOT* backward-compatible. + + """ + from torch._subclasses import fake_tensor + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + # This overrides the internal `FakeTensorMode` instance created by `torch._dynamo.export`[1]. + # It is a good idea to keep them in sync (constructor args) to maintain the same default behavior + # [1] `torch/_dynamo/output_graph.py::InstructionTranslator::OutputGraph.__init__` + # Mixed fake/real tensors are only allowed when `torch.onnx.dynamo_export` is not called within `FakeTensorMode` + # This is needed because models can create new parameters during `forward(self, *args, **kwargs)` run + fake_mode = fake_tensor.FakeTensorMode( + allow_non_fake_inputs=not torch._guards.detect_fake_mode(), + shape_env=ShapeEnv( + allow_scalar_outputs=False, allow_dynamic_output_shape_ops=False + ), + ) + # The patcher is needed for when user calls `fake_model.load_state_dict(...)` within fake mode + patcher_context = patcher.ONNXTorchPatcher() + fake_context = ONNXFakeContext(fake_mode=fake_mode) + with fake_mode, patcher_context: + yield fake_context + fake_context.state_dict_paths = tuple( + patcher_context.paths, + ) # type: ignore[assignment] + + +@runtime_checkable +class ONNXProgramSerializer(Protocol): + """Protocol for serializing an ONNX graph into a specific format (e.g. Protobuf). + Note that this is an advanced usage scenario.""" + + def serialize( + self, onnx_program: ONNXProgram, destination: io.BufferedIOBase + ) -> None: + """Protocol method that must be implemented for serialization. + + Args: + onnx_program: Represents the in-memory exported ONNX model + destination: A binary IO stream or pre-allocated buffer into which + the serialized model should be written. + + Example: + + A simple serializer that writes the exported :py:obj:`onnx.ModelProto` in Protobuf + format to ``destination``: + + :: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import io + >>> import torch + >>> import torch.onnx + >>> class MyModel(torch.nn.Module): # Dummy model + ... def __init__(self) -> None: + ... super().__init__() + ... self.linear = torch.nn.Linear(2, 2) + ... def forward(self, x): + ... out = self.linear(x) + ... return out + >>> class ProtobufONNXProgramSerializer: + ... def serialize( + ... self, onnx_program: torch.onnx.ONNXProgram, destination: io.BufferedIOBase + ... ) -> None: + ... destination.write(onnx_program.model_proto.SerializeToString()) + >>> model = MyModel() + >>> arg1 = torch.randn(2, 2, 2) # positional input 1 + >>> torch.onnx.dynamo_export(model, arg1).save( + ... destination="exported_model.onnx", + ... serializer=ProtobufONNXProgramSerializer(), + ... ) + """ + ... + + +class ProtobufONNXProgramSerializer: + """Serializes ONNX graph as Protobuf.""" + + @_beartype.beartype + def serialize( + self, onnx_program: ONNXProgram, destination: io.BufferedIOBase + ) -> None: + import onnx + + if not isinstance(onnx_program.model_proto, onnx.ModelProto): # type: ignore[attr-defined] + raise ValueError("onnx_program.ModelProto is not an onnx.ModelProto") + destination.write(onnx_program.model_proto.SerializeToString()) + + +class LargeProtobufONNXProgramSerializer: + """Serializes ONNX graph as Protobuf. + + Fallback to serializing as Protobuf with external data for models larger than 2GB. + """ + + _destination_path: Final[str] + + def __init__(self, destination_path: str): + self._destination_path = destination_path + + @_beartype.beartype + def serialize( + self, onnx_program: ONNXProgram, destination: io.BufferedIOBase + ) -> None: + """`destination` is ignored. The model is saved to `self._destination_path` instead.""" + import onnx + + if onnx_program.model_proto.ByteSize() < _PROTOBUF_SIZE_MAX_LIMIT: + onnx.save_model(onnx_program.model_proto, self._destination_path) # type: ignore[attr-defined] + else: + # ValueError: Message onnx.ModelProto exceeds maximum protobuf size of 2GB + # Fallback to serializing the model with external data. + onnx.save_model( # type: ignore[attr-defined] + onnx_program.model_proto, + self._destination_path, + save_as_external_data=True, + all_tensors_to_one_file=True, + ) + + +class ONNXRuntimeOptions: + """Options to influence the execution of the ONNX model through ONNX Runtime. + + Attributes: + session_options: ONNX Runtime session options. + execution_providers: ONNX Runtime execution providers to use during model execution. + execution_provider_options: ONNX Runtime execution provider options. + """ + + session_options: Optional[Sequence["onnxruntime.SessionOptions"]] = None + """ONNX Runtime session options.""" + + execution_providers: Optional[ + Sequence[Union[str, Tuple[str, Dict[Any, Any]]]] + ] = None + """ONNX Runtime execution providers to use during model execution.""" + + execution_provider_options: Optional[Sequence[Dict[Any, Any]]] = None + """ONNX Runtime execution provider options.""" + + @_beartype.beartype + def __init__( + self, + *, + session_options: Optional[Sequence["onnxruntime.SessionOptions"]] = None, + execution_providers: Optional[ + Sequence[Union[str, Tuple[str, Dict[Any, Any]]]] + ] = None, + execution_provider_options: Optional[Sequence[Dict[Any, Any]]] = None, + ): + self.session_options = session_options + self.execution_providers = execution_providers + self.execution_provider_options = execution_provider_options + + +class ONNXProgram: + """An in-memory representation of a PyTorch model that has been exported to ONNX. + + Args: + model_proto: The exported ONNX model as an :py:obj:`onnx.ModelProto`. + input_adapter: The input adapter used to convert PyTorch inputs into ONNX inputs. + output_adapter: The output adapter used to convert PyTorch outputs into ONNX outputs. + diagnostic_context: Context object for the SARIF diagnostic system responsible for logging errors and metadata. + fake_context: The fake context used for symbolic tracing. + export_exception: The exception that occurred during export, if any. + model_signature: The model signature for the exported ONNX graph. + """ + + _model_proto: Final[onnx.ModelProto] # type: ignore[name-defined] + _input_adapter: Final[io_adapter.InputAdapter] + _output_adapter: Final[io_adapter.OutputAdapter] + _diagnostic_context: Final[diagnostics.DiagnosticContext] + _fake_context: Final[Optional[ONNXFakeContext]] + _export_exception: Final[Optional[Exception]] + _model_signature: Final[Optional[torch.export.ExportGraphSignature]] + _model_torch: Final[ + Optional[Union[torch.nn.Module, Callable, torch_export.ExportedProgram]] + ] + + @_beartype.beartype + def __init__( + self, + model_proto: onnx.ModelProto, # type: ignore[name-defined] + input_adapter: io_adapter.InputAdapter, + output_adapter: io_adapter.OutputAdapter, + diagnostic_context: diagnostics.DiagnosticContext, + *, + fake_context: Optional[ONNXFakeContext] = None, + export_exception: Optional[Exception] = None, + model_signature: Optional[torch.export.ExportGraphSignature] = None, + model_torch: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ): + self._model_proto = model_proto + self._model_signature = model_signature + self._model_torch = model_torch + self._input_adapter = input_adapter + self._output_adapter = output_adapter + self._diagnostic_context = diagnostic_context + self._fake_context = fake_context + self._export_exception = export_exception + + def __call__( + self, + *args: Any, + model_with_state_dict: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + options: Optional[ONNXRuntimeOptions] = None, + **kwargs: Any, + ) -> Any: + """Runs the ONNX model using ONNX Runtime + + Args: + args: The positional inputs to the model. + kwargs: The keyword inputs to the model. + model_with_state_dict: The PyTorch model to fetch state from. + Required when :func:`enable_fake_mode` is used to extract real initializers as needed by the ONNX graph. + options: The options to use for running the model with ONNX Runtime. + + Returns: + The model output as computed by ONNX Runtime + """ + import onnxruntime # type: ignore[import] + + # model specified by the user has precedence, when specified + model_with_state_dict = model_with_state_dict or self._model_torch + + onnx_input = self.adapt_torch_inputs_to_onnx( + *args, model_with_state_dict=model_with_state_dict, **kwargs + ) + options = options or ONNXRuntimeOptions() + providers = options.execution_providers or onnxruntime.get_available_providers() + onnx_model = self.model_proto.SerializeToString() + ort_session = onnxruntime.InferenceSession(onnx_model, providers=providers) + + onnxruntime_input = { + k.name: v.numpy(force=True) + for k, v in zip(ort_session.get_inputs(), onnx_input) + } + + return ort_session.run(None, onnxruntime_input) + + @property + def model_proto(self) -> onnx.ModelProto: # type: ignore[name-defined] + """The exported ONNX model as an :py:obj:`onnx.ModelProto`.""" + + if self._export_exception is not None: + raise self._export_exception + return self._model_proto + + @property + def model_signature(self) -> Optional[torch.export.ExportGraphSignature]: + """The model signature for the exported ONNX graph. + + This information is relevant because ONNX specification often differs from PyTorch's, resulting + in a ONNX graph with input and output schema different from the actual PyTorch model implementation. + By using the model signature, the users can understand the inputs and outputs differences + and properly execute the model in ONNX Runtime. + + NOTE: Model signature is only available when the ONNX graph was exported from a + :class:`torch.export.ExportedProgram` object. + + NOTE: Any transformation done to the model that changes the model signature must be accompanied + by updates to this model signature as well through :class:`InputAdaptStep` and/or :class:`OutputAdaptStep`. + + Example: + + The following model produces different sets of inputs and outputs. + The first 4 inputs are model parameters (namely conv1.weight, conv2.weight, fc1.weight, fc2.weight), + and the next 2 inputs are registered buffers (namely my_buffer2, my_buffer1) and finally + the last 2 inputs are user inputs (namely x and b). + The first output is a buffer mutation (namely my_buffer2) and the last output is the actual model output. + + >>> class CustomModule(torch.nn.Module): + ... def __init__(self): + ... super().__init__() + ... self.my_parameter = torch.nn.Parameter(torch.tensor(2.0)) + ... self.register_buffer("my_buffer1", torch.tensor(3.0)) + ... self.register_buffer("my_buffer2", torch.tensor(4.0)) + ... self.conv1 = torch.nn.Conv2d(1, 32, 3, 1, bias=False) + ... self.conv2 = torch.nn.Conv2d(32, 64, 3, 1, bias=False) + ... self.fc1 = torch.nn.Linear(9216, 128, bias=False) + ... self.fc2 = torch.nn.Linear(128, 10, bias=False) + ... def forward(self, x, b): + ... tensor_x = self.conv1(x) + ... tensor_x = torch.nn.functional.sigmoid(tensor_x) + ... tensor_x = self.conv2(tensor_x) + ... tensor_x = torch.nn.functional.sigmoid(tensor_x) + ... tensor_x = torch.nn.functional.max_pool2d(tensor_x, 2) + ... tensor_x = torch.flatten(tensor_x, 1) + ... tensor_x = self.fc1(tensor_x) + ... tensor_x = torch.nn.functional.sigmoid(tensor_x) + ... tensor_x = self.fc2(tensor_x) + ... output = torch.nn.functional.log_softmax(tensor_x, dim=1) + ... ( + ... self.my_buffer2.add_(1.0) + self.my_buffer1 + ... ) # Mutate buffer through in-place addition + ... return output + >>> inputs = (torch.rand((64, 1, 28, 28), dtype=torch.float32), torch.randn(3)) + >>> exported_program = torch.export.export(CustomModule(), args=inputs) + >>> onnx_program = torch.onnx.dynamo_export(exported_program, *inputs) + >>> print(onnx_program.model_signature) + ExportGraphSignature( + input_specs=[ + InputSpec(kind=, arg=TensorArgument(name='arg0_1'), target='conv1.weight'), + InputSpec(kind=, arg=TensorArgument(name='arg1_1'), target='conv2.weight'), + InputSpec(kind=, arg=TensorArgument(name='arg2_1'), target='fc1.weight'), + InputSpec(kind=, arg=TensorArgument(name='arg3_1'), target='fc2.weight'), + InputSpec(kind=, arg=TensorArgument(name='arg4_1'), target='my_buffer2'), + InputSpec(kind=, arg=TensorArgument(name='arg5_1'), target='my_buffer1'), + InputSpec(kind=, arg=TensorArgument(name='l_x_'), target=None), + InputSpec(kind=, arg=TensorArgument(name='arg1'), target=None) + ], + output_specs=[ + OutputSpec(kind=, arg=TensorArgument(name='add'), target='my_buffer2'), + OutputSpec(kind=, arg=TensorArgument(name='_log_softmax'), target=None) + ] + ) + """ + + return self._model_signature + + @property + def diagnostic_context(self) -> diagnostics.DiagnosticContext: + """The diagnostic context associated with the export.""" + + return self._diagnostic_context + + @property + def fake_context(self) -> Optional[ONNXFakeContext]: + """The fake context associated with the export.""" + + return self._fake_context + + @_beartype.beartype + def adapt_torch_inputs_to_onnx( + self, + *model_args, + model_with_state_dict: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + **model_kwargs, + ) -> Sequence[Union[torch.Tensor, int, float, bool]]: + """Converts the PyTorch model inputs to exported ONNX model inputs format. + + Due to design differences, input/output format between PyTorch model and exported + ONNX model are often not the same. E.g., None is allowed for PyTorch model, but are + not supported by ONNX. Nested constructs of tensors are allowed for PyTorch model, + but only flattened tensors are supported by ONNX, etc. + + The actual adapting steps are associated with each individual export. It + depends on the PyTorch model, the particular set of model_args and model_kwargs + used for the export, and export options. + + This method replays the adapting steps recorded during export. + + Args: + model_args: The PyTorch model inputs. + model_with_state_dict: The PyTorch model to get extra state from. + If not specified, the model used during export is used. + Required when :func:`enable_fake_mode` is used to extract real initializers as needed by the ONNX graph. + model_kwargs: The PyTorch model keyword inputs. + + Returns: + A sequence of tensors converted from PyTorch model inputs. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> import torch.onnx + >>> from typing import Dict, Tuple + >>> def func_nested_input( + ... x_dict: Dict[str, torch.Tensor], + ... y_tuple: Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] + ... ): + ... if "a" in x_dict: + ... x = x_dict["a"] + ... elif "b" in x_dict: + ... x = x_dict["b"] + ... else: + ... x = torch.randn(3) + ... + ... y1, (y2, y3) = y_tuple + ... + ... return x + y1 + y2 + y3 + >>> x_dict = {"a": torch.tensor(1.)} + >>> y_tuple = (torch.tensor(2.), (torch.tensor(3.), torch.tensor(4.))) + >>> onnx_program = torch.onnx.dynamo_export(func_nested_input, x_dict, y_tuple) + >>> print(x_dict, y_tuple) + {'a': tensor(1.)} (tensor(2.), (tensor(3.), tensor(4.))) + >>> print(onnx_program.adapt_torch_inputs_to_onnx(x_dict, y_tuple, model_with_state_dict=func_nested_input)) + (tensor(1.), tensor(2.), tensor(3.), tensor(4.)) + + .. warning:: + This API is experimental and is *NOT* backward-compatible. + + """ + # model specified by the user has precedence, when specified + model_with_state_dict = model_with_state_dict or self._model_torch + assert ( + model_with_state_dict is not None + ), "model_with_state_dict must be specified." + return self._input_adapter.apply( + *model_args, model=model_with_state_dict, **model_kwargs + ) + + @_beartype.beartype + def adapt_torch_outputs_to_onnx( + self, + model_outputs: Any, + model_with_state_dict: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Union[torch.Tensor, int, float, bool]]: + """Converts the PyTorch model outputs to exported ONNX model outputs format. + + Due to design differences, input/output format between PyTorch model and exported + ONNX model are often not the same. E.g., None is allowed for PyTorch model, but are + not supported by ONNX. Nested constructs of tensors are allowed for PyTorch model, + but only flattened tensors are supported by ONNX, etc. + + The actual adapting steps are associated with each individual export. It + depends on the PyTorch model, the particular set of model_args and model_kwargs + used for the export, and export options. + + This method replays the adapting steps recorded during export. + + Args: + model: The PyTorch model to get extra state from. + model_outputs: The PyTorch model outputs. + model_with_state_dict: The PyTorch model to get extra state from. + If not specified, the model used during export is used. + Required when :func:`enable_fake_mode` is used to extract real initializers as needed by the ONNX graph. + + Returns: + PyTorch model outputs in exported ONNX model outputs format. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> import torch.onnx + >>> def func_returning_tuples(x, y, z): + ... x = x + y + ... y = y + z + ... z = x + y + ... return (x, (y, z)) + >>> x = torch.tensor(1.) + >>> y = torch.tensor(2.) + >>> z = torch.tensor(3.) + >>> onnx_program = torch.onnx.dynamo_export(func_returning_tuples, x, y, z) + >>> pt_output = func_returning_tuples(x, y, z) + >>> print(pt_output) + (tensor(3.), (tensor(5.), tensor(8.))) + >>> print(onnx_program.adapt_torch_outputs_to_onnx(pt_output, model_with_state_dict=func_returning_tuples)) + [tensor(3.), tensor(5.), tensor(8.)] + + .. warning:: + This API is experimental and is *NOT* backward-compatible. + + """ + # model specified by the user has precedence, when specified + model_with_state_dict = model_with_state_dict or self._model_torch + assert ( + model_with_state_dict is not None + ), "model_with_state_dict must be specified." + return self._output_adapter.apply(model_outputs, model=model_with_state_dict) + + @_beartype.beartype + def save( + self, + destination: Union[str, io.BufferedIOBase], + *, + model_state_dict: Optional[Union[Dict[str, Any], str]] = None, + serializer: Optional[ONNXProgramSerializer] = None, + ) -> None: + """Saves the in-memory ONNX model to ``destination`` using specified ``serializer``. + + Args: + destination: The destination to save the ONNX model. It can be either a string or a file-like object. + When used with ``model_state_dict``, it must be a string with a full path to the destination. + In that case, besides saving the ONNX model, a folder with "_initializers" suffix (without extension) + will be created to store the each initializer of the ONNX model in a separate file. For example, if the + destination is "/path/model.onnx", the initializers will be saved in "/path/model_initializers/" folder. + model_state_dict: The state_dict of the PyTorch model containing all weights on it. + It can be either a dict as returned by :meth:`model.state_dict`, or a string with a file name. + Required when :func:`enable_fake_mode` is used but real initializers are needed on the ONNX graph. + It can be either a string with the path to a checkpoint or a dictionary with the actual model state. + + serializer: The serializer to use. If not specified, the model will be serialized as Protobuf. + """ + + if serializer is None: + if isinstance(destination, str): + serializer = LargeProtobufONNXProgramSerializer(destination) + else: + serializer = ProtobufONNXProgramSerializer() + + # Add initializers when symbolic tracing is enabled + _model_state_dict_files: List[Union[str, io.BytesIO]] = [] + if model_state_dict is not None: + if isinstance(model_state_dict, dict): + model_state_dict_file = io.BytesIO() + torch.save(model_state_dict, model_state_dict_file) + model_state_dict_file.seek(0) + _model_state_dict_files.append(model_state_dict_file) + else: + isinstance( + model_state_dict, str + ), "model_state_dict must be a path to the model's state_dict or the actual state_dict" + _model_state_dict_files.append(model_state_dict) + elif self._fake_context and self._fake_context.state_dict_paths: + # Load state from previous model.load_state_dict() call within enable_fake_mode() context + for path in self._fake_context.state_dict_paths: + if path in _model_state_dict_files: + # ignore duplicate + continue + try: + extra_state_dict = torch.load(path) + extra_state_dict_file = io.BytesIO() + torch.save(extra_state_dict, extra_state_dict_file) + extra_state_dict_file.seek(0) + _model_state_dict_files.append(extra_state_dict_file) + except FileNotFoundError: + # It is ok to ignore transient state_dict file created within context manager + pass + + if _model_state_dict_files: + if not isinstance(destination, str): + raise RuntimeError( + "`destination` must be a string with a path when `model_state_dict` is specified." + ) + destination_path, destination_filename = os.path.split(destination) + onnx_model_location = destination_filename + onnx_initializer_location = ( + destination_filename.split(".")[0] + "_initializers" + ) + # TODO: Should this be part of the serializer? + fx_serialization.save_model_with_external_data( + destination_path, + onnx_model_location, + onnx_initializer_location, + tuple(_model_state_dict_files), + self.model_proto, + ) + else: + if isinstance(destination, str): + with open(destination, "wb") as f: + serializer.serialize(self, f) + else: + try: + serializer.serialize(self, destination) + except ValueError as exc: + raise ValueError( + "'destination' should be provided as a path-like string when saving a model larger than 2GB. " + "External tensor data will be saved alongside the model on disk." + ) from exc + + @_beartype.beartype + def save_diagnostics(self, destination: str) -> None: + """Saves the export diagnostics as a SARIF log to the specified destination path. + + Args: + destination: The destination to save the diagnostics SARIF log. + It must have a `.sarif` extension. + + Raises: + ValueError: If the destination path does not end with `.sarif` extension. + """ + if not destination.endswith(".sarif"): + message = f"'destination' must have a .sarif extension, got {destination}" + log.fatal(message) + raise ValueError(message) + + self.diagnostic_context.dump(destination) + + @classmethod + def _from_failure( + cls, + export_exception: Exception, + diagnostic_context: diagnostics.DiagnosticContext, + ) -> Self: + """ + Creates an instance of :class:`ONNXProgram` when the export process encounters a failure. + + In case of a failed export, this method is used to encapsulate the exception + and associated diagnostic context within an :class:`ONNXProgram` instance for + easier handling and debugging. + + Args: + export_exception: The exception raised during the export process. + diagnostic_context: The context associated with diagnostics during export. + + Returns: + An instance of :class:`ONNXProgram` representing the failed ONNX program. + """ + # Defer `import onnx` out of `import torch` path + # https://github.com/pytorch/pytorch/issues/103764 + import onnx + + # TODO: Should we populate ONNXProgram with more info, such _model_torch for easier debug? + return ONNXProgram( + onnx.ModelProto(), # type: ignore[attr-defined] + io_adapter.InputAdapter(), + io_adapter.OutputAdapter(), + diagnostic_context, + export_exception=export_exception, + ) + + +class FXGraphExtractor(abc.ABC): + """Abstract interface for FX graph extractor engines. + This class isolates FX extraction logic from the rest of the export logic. + That allows a single ONNX exporter that can leverage different FX graphs.""" + + def __init__(self) -> None: + super().__init__() + self.input_adapter: io_adapter.InputAdapter = io_adapter.InputAdapter() + self.output_adapter: io_adapter.OutputAdapter = io_adapter.OutputAdapter() + + @abc.abstractmethod + def generate_fx( + self, + options: ResolvedExportOptions, + model: Union[torch.nn.Module, Callable], + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + ) -> torch.fx.GraphModule: + """Analyzes user ``model`` and generates a FX graph. + Args: + options: The export options. + model: The user model. + model_args: The model's positional input arguments. + model_kwargs: The model's keyword input arguments. + Returns: + The generated FX Graph. + """ + ... + + # TODO: Design the passes API + @abc.abstractmethod + def pre_export_passes( + self, + options: ResolvedExportOptions, + original_model: Union[torch.nn.Module, Callable], + fx_module: torch.fx.GraphModule, + fx_module_args: Sequence[Any], + ): + """Applies pre-export passes to the FX graph. + + Pre-export passes are FX-to-FX graph transformations that make the graph + more palatable for the FX-to-ONNX conversion. + For example, it can be used to flatten model input/output, add explicit + casts to the graph, replace/decompose operators, functionalize the graph, etc. + """ + ... + + +class Exporter: + @_beartype.beartype + def __init__( + self, + options: ResolvedExportOptions, + model: Union[torch.nn.Module, Callable, torch_export.ExportedProgram], + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + ): + self.options = options + assert self.options is not None + + self.model = model + self.model_args = model_args + self.model_kwargs = model_kwargs + + # TODO: Retire FXSymbolicTracer + # NOTE: FXSymbolicTracer would fail in this assert, as it does not use `enable_fake_mode` + from torch.onnx._internal.fx import fx_symbolic_graph_extractor + + if not isinstance( + self.options.fx_tracer, fx_symbolic_graph_extractor.FXSymbolicTracer + ): + self._assert_fake_tensor_mode() + + def export(self) -> ONNXProgram: + with self.options.diagnostic_context: + graph_module = self.options.fx_tracer.generate_fx( + self.options, self.model, self.model_args, self.model_kwargs + ) + + # TODO: Defer `import onnxscript` out of `import torch` path + # https://github.com/pytorch/pytorch/issues/103764 + from torch.onnx._internal.fx import fx_onnx_interpreter + + fx_interpreter = fx_onnx_interpreter.FxOnnxInterpreter( + diagnostic_context=self.options.diagnostic_context + ) + onnxscript_graph = fx_interpreter.run( + fx_graph_module=graph_module, + onnxfunction_dispatcher=self.options.onnxfunction_dispatcher, + op_level_debug=self.options.op_level_debug, + ) + + # NOTE: Filter out the initializers with fake tensors when it's fake_mode exporting. + # Otherwise, the ONNX exporter will fail: RuntimeError: basic_string::_M_construct null + # not valid. + # Concrete data is expected to be filled for those initializers later during `ONNXProgram.save`. + if self.options.fake_context is not None: + initializers_with_real_tensors: Dict[str, torch.Tensor] = {} + for ( + initializer_name, + initializer, + ) in onnxscript_graph.initializers.items(): + if not isinstance(initializer, torch._subclasses.FakeTensor): + initializers_with_real_tensors[initializer_name] = initializer + onnxscript_graph.initializers = initializers_with_real_tensors + + # Export TorchScript graph to ONNX ModelProto. + onnx_model = onnxscript_graph.to_model_proto( + self.options.onnx_registry.opset_version, + ) + + return torch.onnx.ONNXProgram( + onnx_model, + self.options.fx_tracer.input_adapter, + self.options.fx_tracer.output_adapter, + self.options.diagnostic_context, + fake_context=self.options.fake_context, + model_signature=getattr( + self.model, "graph_signature", None + ), # Available for isinstance(self.model, ExportedProgram) only + model_torch=self.model, + ) + + def _assert_fake_tensor_mode(self): + """Asserts that the model and its input do not contain fake tensors.""" + + # Case 1: Model with fake inputs/weights and without enabling fake mode + has_any_fake_tensor = pytree.tree_any( + lambda x: isinstance(x, torch._subclasses.FakeTensor), + (self.model_args, self.model_kwargs), + ) + has_any_fake_param_or_buffer = False + if isinstance(self.model, torch.nn.Module): + has_any_fake_param_or_buffer = pytree.tree_any( + lambda x: isinstance(x, torch._subclasses.FakeTensor), + (self.model.parameters(), self.model.buffers()), + ) + if ( + has_any_fake_tensor or has_any_fake_param_or_buffer + ) and not self.options.fake_context: + raise RuntimeError( + "Cannot export a model with fake inputs/weights without enabling fake mode.", + ) + # Case 2: Model with non fake inputs/weights and enabled fake mode + has_any_non_fake_tensors = pytree.tree_any( + lambda x: isinstance(x, torch.Tensor) + and not isinstance(x, torch._subclasses.FakeTensor), + (self.model_args, self.model_kwargs), + ) + has_any_non_fake_param_or_buffer = False + if isinstance(self.model, torch.nn.Module): + has_any_non_fake_param_or_buffer = pytree.tree_any( + lambda x: isinstance(x, torch.Tensor) + and not isinstance(x, torch._subclasses.FakeTensor), + (self.model.parameters(), self.model.buffers()), + ) + if ( + has_any_non_fake_tensors or has_any_non_fake_param_or_buffer + ) and self.options.fake_context: + raise RuntimeError( + "Cannot export a model with non fake inputs/weights and enabled fake mode.", + ) + + +class UnsatisfiedDependencyError(RuntimeError): + """Raised when an ONNX exporter dependency cannot be satisfied.""" + + def __init__(self, package_name: str, message: str): + super().__init__(message) + self.package_name = package_name + + +class OnnxExporterError(RuntimeError): + """Raised when an ONNX exporter error occurs. + + This exception is thrown when there's an error during the ONNX export process. + It encapsulates the :class:`ONNXProgram` object generated until the failure, allowing + access to the partial export results and associated metadata. + """ + + onnx_program: Final[ONNXProgram] + + def __init__(self, onnx_program: ONNXProgram, message: str): + """ + Initializes the OnnxExporterError with the given ONNX program and message. + + Args: + onnx_program (ONNXProgram): The partial results of the ONNX export. + message (str): The error message to be displayed. + """ + super().__init__(message) + self.onnx_program = onnx_program + + +class InvalidExportOptionsError(RuntimeError): + """Raised when user specified an invalid value for the :class:`ExportOptions`.""" + + pass + + +@_beartype.beartype +def _assert_dependencies(export_options: ResolvedExportOptions): + opset_version = export_options.onnx_registry.opset_version + + def missing_package(package_name: str, exc_info: logging._ExcInfoType): + message = ( + f"Please install the `{package_name}` package " + f"(e.g. `python -m pip install {package_name}`)." + ) + log.fatal(message, exc_info=exc_info) + return UnsatisfiedDependencyError(package_name, message) + + def missing_opset(package_name: str): + message = ( + f"The installed `{package_name}` does not support the specified ONNX opset " + f"version {opset_version}. Install a newer `{package_name}` package or " + f"specify an older opset version." + ) + log.fatal(message) + return UnsatisfiedDependencyError(package_name, message) + + try: + import onnx + except ImportError as e: + raise missing_package("onnx", e) from e + + if onnx.defs.onnx_opset_version() < opset_version: + raise missing_opset("onnx") + + try: + # PyTorch runs lintrunner in CI without onnxscript installed + import onnxscript # type: ignore[import] + except ImportError as e: + raise missing_package("onnxscript", e) from e + + if not isinstance( + onnxscript.onnx_opset.all_opsets[("", opset_version)], + onnxscript.values.Opset, + ): + raise missing_opset("onnxscript") + + +@_beartype.beartype +def dynamo_export( + model: Union[torch.nn.Module, Callable, torch_export.ExportedProgram], # type: ignore[name-defined] + /, + *model_args, + export_options: Optional[ExportOptions] = None, + **model_kwargs, +) -> ONNXProgram: + """Export a torch.nn.Module to an ONNX graph. + + Args: + model: The PyTorch model to be exported to ONNX. + model_args: Positional inputs to ``model``. + model_kwargs: Keyword inputs to ``model``. + export_options: Options to influence the export to ONNX. + + Returns: + An in-memory representation of the exported ONNX model. + + **Example 1 - Simplest export** + :: + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.linear = torch.nn.Linear(2, 2) + def forward(self, x, bias=None): + out = self.linear(x) + out = out + bias + return out + model = MyModel() + kwargs = {"bias": 3.} + args = (torch.randn(2, 2, 2),) + onnx_program = torch.onnx.dynamo_export( + model, + *args, + **kwargs).save("my_simple_model.onnx") + + **Example 2 - Exporting with dynamic shapes** + :: + + # The previous model can be exported with dynamic shapes + export_options = torch.onnx.ExportOptions(dynamic_shapes=True) + onnx_program = torch.onnx.dynamo_export( + model, + *args, + **kwargs, + export_options=export_options) + onnx_program.save("my_dynamic_model.onnx") + + + By printing input dynamic dimensions we can see the input shape is no longer (2,2,2) + :: + + >>> print(onnx_program.model_proto.graph.input[0]) + name: "arg0" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_param: "arg0_dim_0" + } + dim { + dim_param: "arg0_dim_1" + } + dim { + dim_param: "arg0_dim_2" + } + } + } + } + """ + + if export_options is not None: + resolved_export_options = ( + export_options + if isinstance(export_options, ResolvedExportOptions) + else ResolvedExportOptions(export_options, model=model) + ) + else: + resolved_export_options = ResolvedExportOptions(ExportOptions(), model=model) + + _assert_dependencies(resolved_export_options) + + try: + return Exporter( + options=resolved_export_options, + model=model, + model_args=model_args, + model_kwargs=model_kwargs, + ).export() + except Exception as e: + sarif_report_path = _DEFAULT_FAILED_EXPORT_SARIF_LOG_PATH + resolved_export_options.diagnostic_context.dump(sarif_report_path) + message = ( + f"Failed to export the model to ONNX. Generating SARIF report at '{sarif_report_path}'. " + "SARIF is a standard format for the output of static analysis tools. " + "SARIF logs can be loaded in VS Code SARIF viewer extension, " + "or SARIF web viewer (https://microsoft.github.io/sarif-web-component/). " + f"Please report a bug on PyTorch Github: {_PYTORCH_GITHUB_ISSUES_URL}" + ) + raise OnnxExporterError( + ONNXProgram._from_failure(e, resolved_export_options.diagnostic_context), + message, + ) from e + + +def common_pre_export_passes( + options: ResolvedExportOptions, + original_model: Union[torch.nn.Module, Callable], + fx_module: torch.fx.GraphModule, + fx_module_args: Sequence[Any], +): + # TODO: Import here to prevent circular dependency + from torch.onnx._internal.fx import analysis, passes + + diagnostic_context = options.diagnostic_context + + # Apply decomposition table to the input graph. + module = passes.Decompose( + diagnostic_context, + fx_module, + options.decomposition_table, + enable_dynamic_axes=options.dynamic_shapes, + allow_fake_constant=options.fake_context is not None, + ).run(*fx_module_args) + + # ONNX does not support views and mutations. + # Functionalize to get a semantically equivalent graph without mutations. + module = passes.Functionalize( + diagnostic_context, + module, + enable_dynamic_axes=options.dynamic_shapes, + allow_fake_constant=options.fake_context is not None, + ).run(*fx_module_args) + + # Input mutations are detected and distilled after `Functionalize` pass. + # Remove them since ONNX inference does not need them. + module = passes.RemoveInputMutation(diagnostic_context, module).run(*fx_module_args) + + # ONNX does not support concept of (implicit) type promotion. + # Insert type casts explicitly where needed. + module = passes.InsertTypePromotion(diagnostic_context, module).run() + + analysis.UnsupportedFxNodesAnalysis( + diagnostic_context, module, options.onnxfunction_dispatcher + ).analyze(infra.levels.ERROR) + + if isinstance(original_model, torch.nn.Module): + module = passes.RestoreParameterAndBufferNames( + diagnostic_context, module, original_model + ).run() + + # This operation should be invoked as the last pre export pass. + # See [NOTE: Modularize pass ordering] + module = passes.Modularize(diagnostic_context, module).run() + + # ONNX does not support None inputs. During graph building, all None inputs + # are removed. Here we register this step to input adapter. + options.fx_tracer.input_adapter.append_step(io_adapter.RemoveNoneInputStep()) + + # NOTE: temp workaround for https://github.com/pytorch/pytorch/issues/99534 + # Dynamo doesn't support non-tensor inputs. + options.fx_tracer.input_adapter.append_step(io_adapter.RemoveNonTensorInputStep()) + + # ONNX does not support complex inputs. During graph building, all complex inputs + # are converted to real representation inputs. Here we register this step to + # input/output adapter. + options.fx_tracer.input_adapter.append_step( + io_adapter.ConvertComplexToRealRepresentationInputStep() + ) + + # ONNX can't represent collection types (e.g., dictionary, tuple of tuple of + # tensor, etc), we flatten the collection and register each element as output. + options.fx_tracer.output_adapter.append_step(io_adapter.FlattenOutputStep()) + + # Output post-processing steps should happen after `FlattenOutputStep`. + options.fx_tracer.output_adapter.append_step( + io_adapter.ConvertComplexToRealRepresentationOutputStep() + ) + + return module + + +__all__ = [ + "DiagnosticOptions", + "ExportOptions", + "ONNXProgram", + "ONNXProgramSerializer", + "ONNXRuntimeOptions", + "InvalidExportOptionsError", + "OnnxExporterError", + "OnnxRegistry", + "UnsatisfiedDependencyError", + "dynamo_export", + "enable_fake_mode", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/io_adapter.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/io_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..de440e5a573fef7fadae8d333f8b183e01158444 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/io_adapter.py @@ -0,0 +1,672 @@ +from __future__ import annotations + +import inspect + +from typing import ( + Any, + Callable, + List, + Mapping, + Optional, + Protocol, + runtime_checkable, + Sequence, + Tuple, + Union, +) + +import torch +import torch.export as torch_export + +from torch.onnx._internal import _beartype +from torch.utils import _pytree as pytree + +# TODO(bowbao): Add diagnostics for IO adapters. + + +@runtime_checkable +class InputAdaptStep(Protocol): + """A protocol that defines a step in the input adapting process. + + The input adapting process is a sequence of steps that are applied to the + PyTorch model inputs to transform them into the inputs format expected by the + exported ONNX model. Each step takes the PyTorch model inputs as arguments and + returns the transformed inputs. + + This serves as a base formalized construct for the transformation done to model + input signature by any individual component in the exporter. + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + ... + + +class InputAdapter: + """A class that adapts the PyTorch model inputs to exported ONNX model inputs format.""" + + def __init__(self, steps: Optional[List[InputAdaptStep]] = None): + self._steps = steps or [] + + @_beartype.beartype + def append_step(self, step: InputAdaptStep) -> None: + """Appends a step to the input adapt steps. + + Args: + step: The step to append. + """ + self._steps.append(step) + + @_beartype.beartype + def apply( + self, + *model_args, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + **model_kwargs, + ) -> Sequence[Union[int, float, bool, str, "torch.Tensor", None]]: + """Converts the PyTorch model inputs to exported ONNX model inputs format. + + Args: + model_args: The PyTorch model inputs. + model: The PyTorch model. + model_kwargs: The PyTorch model keyword inputs. + Returns: + A sequence of tensors converted from PyTorch model inputs. + """ + args: Sequence[Any] = model_args + kwargs: Mapping[str, Any] = model_kwargs + for step in self._steps: + args, kwargs = step.apply(args, kwargs, model=model) + assert not kwargs + return args + + +@runtime_checkable +class OutputAdaptStep(Protocol): + """A protocol that defines a step in the output adapting process. + + The output adapting process is a sequence of steps that are applied to the + PyTorch model outputs to transform them into the outputs format produced by the + exported ONNX model. Each step takes the PyTorch model outputs as arguments and + returns the transformed outputs. + + This serves as a base formalized construct for the transformation done to model + output signature by any individual component in the exporter. + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Any: + ... + + +class OutputAdapter: + """A class that adapts the PyTorch model outputs to exported ONNX model outputs format.""" + + def __init__(self, steps: Optional[List[OutputAdaptStep]] = None): + self._steps = steps or [] + + @_beartype.beartype + def append_step(self, step: OutputAdaptStep) -> None: + """Appends a step to the output format steps. + + Args: + step: The step to append. + """ + self._steps.append(step) + + @_beartype.beartype + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Union["torch.Tensor", int, float, bool, str]]: + """Converts the PyTorch model outputs to exported ONNX model outputs format. + + Args: + model_outputs: The PyTorch model outputs. + model: The PyTorch model. + + Returns: + PyTorch model outputs in exported ONNX model outputs format. + """ + for step in self._steps: + model_outputs = step.apply(model_outputs, model=model) + return model_outputs + + +# TODO: make_fx lose stack info https://github.com/pytorch/pytorch/issues/90276 + + +def _replace_tuple_with_list(spec: pytree.TreeSpec) -> pytree.TreeSpec: + _type = list if spec.type == tuple else spec.type + return pytree.TreeSpec( + _type, spec.context, list(map(_replace_tuple_with_list, spec.children_specs)) + ) + + +def _open_top_level_list_if_single_element(spec: pytree.TreeSpec) -> pytree.TreeSpec: + if spec.type == list and len(spec.children_specs) == 1: + return spec.children_specs[0] + return spec + + +def _assert_identical_pytree_spec( + spec1: pytree.TreeSpec, spec2: pytree.TreeSpec, error_message: str +) -> None: + """Assert the two `TreeSpec` objects are identical. + + Args: + spec1: The first `TreeSpec` object. + spec2: The second `TreeSpec` object. + error_message: The error message to raise if the two `TreeSpec` objects are not + identical. + + Raises: + ValueError: If the two `TreeSpec` objects are not identical. + """ + # TODO(bowbao): Turn this check into diagnostic. Consider warning instead of error. + pass_if_any_checks: Sequence[Callable[[], bool]] = [ + lambda: spec1 == spec2, + # FIXME: Bug in `dynamo.export`. Sometimes outputs returned in 'list' instead of 'tuple'. + lambda: _replace_tuple_with_list(spec1) == _replace_tuple_with_list(spec2), + # FIXME: Bug in `dynamo.export`. Sometimes single function return is wrapped in list. + lambda: _open_top_level_list_if_single_element(spec1) == spec2, + lambda: spec1 == _open_top_level_list_if_single_element(spec2), + ] + + if not any(check() for check in pass_if_any_checks): + raise ValueError(f"{error_message}\nExpect {spec1}.\nActual {spec2}.") + + +class BindInputStep(InputAdaptStep): + """Bind the input arguments to the model signature.""" + + def __init__(self, model_signature: inspect.Signature): + self._model_signature = model_signature + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Bind the input arguments to the model signature. + + We hope the input kwargs will be mapped to bound.args after binding. + If not, we will raise an error. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. args is always empty. + + Raises: + ValueError: If there are keyword-only arguments left after binding args and + kwargs to model signature. + """ + bound = self._model_signature.bind(*model_args, **model_kwargs) + bound.apply_defaults() + + # keyword-only arguments are not handled. + # bound.kwargs only contains keyword-only arguments after calling + # bind & apply_defaults, so we raise if it's not empty. + if bound.kwargs: + raise ValueError("Keyword-only arguments are not supported.") + return (), bound.arguments + + +class MergeKwargsIntoArgsInputStep(InputAdaptStep): + """Merge the input kwargs into the input args.""" + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Merge the input kwargs into the input args. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. kwargs is always empty. + """ + return tuple(model_args) + tuple(model_kwargs.values()), {} + + +class LiftParametersAndBuffersIntoArgsInputStep(InputAdaptStep): + """Append parameters and buffers to model's positional argument list.""" + + def __init__(self, inputs: Tuple["torch.Tensor", ...]) -> None: + self.inputs = inputs + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Append model's parameters and buffers into its input. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args + appended inputs and kwargs. + """ + return (*model_args, *self.inputs), model_kwargs + + +class ConvertComplexToRealRepresentationInputStep(InputAdaptStep): + """Convert complex dtype tensors to real representation tensors. + + ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors + to real representation tensors (i.e., float dtype tensors with an extra dimension + representing the real and imaginary parts of the complex number). + + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Convert complex tensors to float tensors. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + """ + return ( + tuple( + torch.view_as_real(arg) + if isinstance(arg, torch.Tensor) and arg.is_complex() + else arg + for arg in model_args + ), + model_kwargs, + ) + + +class RemoveNoneInputStep(InputAdaptStep): + """Remove `None` from arguments. + + This adapt step assumes ``model_kwargs`` is empty. It also assumes ``model_args`` + is flattened, i.e. it does not check `None` inside nested collections. + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Remove `None` from arguments. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + + Raises: + ValueError: If `model_kwargs` is not empty. + """ + assert not model_kwargs + return tuple(arg for arg in model_args if arg is not None), {} + + +class RemoveNonTensorInputStep(InputAdaptStep): + """Remove the non-tensor input arguments. + + Dynamo does not support non-tensor input arguments (https://github.com/pytorch/pytorch/issues/99534). + + Specifically, it does put the input into graph with an empty node, but consumed by no ones. + The concrete value is embedded into the graph as a constant arg of a target node. Meta + suggests in this case that one should rewrite the model code to make it tensor if the + input value is supposed to change at runtime. We might need to further investigate + the feasibility of that suggestion. + + For example, + + def func(x, b=1.0): + y = x + b + z = y.relu() + return (y, z) + + x = torch.randn(1, 1, 2, dtype=torch.float32) + gm_fun, _ = dynamo.export(func, x, b=8.0, aten_graph=True, tracing_mode="real") + + # class GraphModule(torch.nn.Module): + # def forward(self, x, b): + # arg0: f32[1, 1, 2], arg1, = fx_pytree.tree_flatten_spec(([x, b], {}), self._in_spec) + # # File: path/to/pytorch/test_constant_input.py:5, code: y = x + b + # add_tensor: f32[1, 1, 2] = torch.ops.aten.add.Tensor(arg0, 8.0); arg0 = None + + # # File: path/to/pytorch/test_constant_input.py:6, code: z = y.relu() + # relu_default: f32[1, 1, 2] = torch.ops.aten.relu.default(add_tensor) + # return pytree.tree_unflatten([add_tensor, relu_default], self._out_spec) + + Empty torch.fx.Node input leading to a mismatched number of input with PyTorch, as + it's ignored in ONNX graph. Thus, we delete the useless input here. + + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Remove Constant from arguments. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + + Raises: + ValueError: If `model_kwargs` is not empty. + """ + assert not model_kwargs + return ( + tuple( + arg + for arg in model_args + if not isinstance(arg, (int, float, bool, str)) + ), + {}, + ) + + +class FlattenInputWithTreeSpecValidationInputStep(InputAdaptStep): + """Flatten nested collection types and return a flat list of elements. + + ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, + etc). + + This class stores the `SpecTree` output produced when `adapt` was called the first + time. It then validates the `SpecTree` output produced from later `adapt` calls. + """ + + _spec: Optional[pytree.TreeSpec] = None + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Flatten the model args and kwargs and validate the `SpecTree` output. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the flattened model args and kwargs. The kwargs is empty, because + they are flattened and merged into the args. + + Raises: + ValueError: If the `SpecTree` output produced from the current `model_outputs` + is not identical to the `SpecTree` output produced from the first + `model_outputs` that was passed to this method. + """ + flattened_args, spec = pytree.tree_flatten((model_args, model_kwargs)) + if self._spec is None: + self._spec = spec + else: + _assert_identical_pytree_spec( + self._spec, + spec, + error_message="Model inputs incompatible with the format that was exported. ", + ) + return flattened_args, {} + + +class FlattenOutputStep(OutputAdaptStep): + """Flatten nested collection types and return a flat list of elements. + + ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, + etc). + + NOTE: Ideally we would want to use ``FlattenOutputWithTreeSpecValidationOutputStep``, such + that `SpecTree` can be validate for new model outputs. However, this is not possible + currently because we never have access to real PyTorch model outputs during export. + Only traced outputs may be available, but they are not an accurate reflection of the + original PyTorch model outputs format as they are typically in their own unique format, + depending on the tracing strategy. + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Any]: + """Flatten the model outputs. + + Args: + model_outputs: The model outputs to flatten. + model: The PyTorch model. + + Returns: + A tuple of the flattened model outputs. + """ + return pytree.tree_leaves(model_outputs) + + +class ConvertComplexToRealRepresentationOutputStep(OutputAdaptStep): + """Convert complex dtype tensors to real representation tensors. + + ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors + to real representation tensors (i.e., float dtype tensors with an extra dimension + representing the real and imaginary parts of the complex number). + + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Any: + """Convert float tensors to complex tensors. + + Args: + model_outputs: The model output. + model: The PyTorch model. + + Returns: + A tuple of the model output. + """ + return [ + torch.view_as_real(output) + if isinstance(output, torch.Tensor) and torch.is_complex(output) + else output + for output in model_outputs + ] + + +class FlattenOutputWithTreeSpecValidationOutputStep(OutputAdaptStep): + """Same as ``FlattenOutputStep``, with additional `TreeSpec` validation. + + This class stores the `SpecTree` output produced when `adapt` was called the first + time. It then validates the `SpecTree` output produced from later `adapt` calls. + """ + + _spec: Optional[pytree.TreeSpec] = None + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Any]: + """Flatten the model outputs and validate the `SpecTree` output. + + Args: + model_outputs: The model outputs to flatten. + model: The PyTorch model. + + Returns: + flattened_outputs: The flattened model outputs. + + Raises: + ValueError: If the `SpecTree` output produced from the current `model_outputs` + is not identical to the `SpecTree` output produced from the first + `model_outputs` that was passed to this method. + """ + flattened_outputs, spec = pytree.tree_flatten(model_outputs) + if self._spec is None: + self._spec = spec + else: + _assert_identical_pytree_spec( + self._spec, + spec, + error_message="Model outputs incompatible with the format that was exported. ", + ) + return flattened_outputs + + +class PrependParamsBuffersConstantAotAutogradInputStep(InputAdaptStep): + """Prepend model parameters, buffers and constants to the user input. + + :func:`torch.export.export` lifts model parameters, buffers and constants as model input, thus, they + must be added to the user input before the model is executed. + + Args: + model: The PyTorch model with embedded parameters and buffers. + """ + + def apply( + self, + model_args: Sequence[Any], + model_kwargs: Mapping[str, Any], + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Tuple[Sequence[Any], Mapping[str, Any]]: + """Convert complex tensors to float tensors. + + Args: + model_args: The model args. + model_kwargs: The model kwargs. + model: The PyTorch model. + + Returns: + A tuple of the model args and kwargs. + """ + ordered_params = tuple( + model.state_dict[name] for name in model.graph_signature.parameters # type: ignore[union-attr,index] + ) + ordered_buffers = tuple( + model.state_dict[name] for name in model.graph_signature.buffers # type: ignore[union-attr,index] + ) + ordered_constant_tensors = tuple( + getattr(model.module(), name) for name in model.graph_signature.lifted_tensor_constants # type: ignore[union-attr,index] + ) + + # NOTE: calling convention is first params, then buffers, then args as user supplied them. + # See: torch/_functorch/aot_autograd.py#L1034 + updated_args = ( + *ordered_params, + *ordered_buffers, + *ordered_constant_tensors, + *model_args, + ) + if model_kwargs: + return MergeKwargsIntoArgsInputStep().apply( + updated_args, model_kwargs, model=model + ) + return updated_args, {} + + +class PrependParamsAndBuffersAotAutogradOutputStep(OutputAdaptStep): + """Prepend model's mutated buffers to the user output. + + :func:`torch.export.export` lifts model's mutated buffers as outputs, thus, they + must be added to the user output after the model is executed. + + Args: + model: The PyTorch model with mutated buffers. + """ + + def apply( + self, + model_outputs: Any, + model: Optional[ + Union[torch.nn.Module, Callable, torch_export.ExportedProgram] + ] = None, + ) -> Sequence[Any]: + """Flatten the model outputs and validate the `SpecTree` output. + + Args: + model_outputs: The model outputs to flatten. + model: The PyTorch model. + + Returns: + flattened_outputs: The flattened model outputs. + """ + + assert isinstance( + model, torch_export.ExportedProgram + ), "'model' must be torch_export.ExportedProgram" + ordered_buffers = tuple( + model.state_dict[name] + for name in model.graph_signature.buffers_to_mutate.values() + ) + # NOTE: calling convention is first mutated buffers, then outputs args as model returned them. + updated_outputs = (*ordered_buffers, *model_outputs) + return updated_outputs diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9052961fc7a646a3b1c153cc1ee2a1fbd06ba697 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py @@ -0,0 +1,399 @@ +"""Utilities for manipulating the torch.Graph object and the torchscript.""" +from __future__ import annotations + +# TODO(justinchuby): Move more of the symbolic helper functions here and expose +# them to the user. + +import dataclasses +import re +import typing +from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union + +import torch +from torch import _C +from torch._C import _onnx as _C_onnx +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, registration + + +_ATTR_PATTERN = re.compile("^(.+)_(([ifstgz])|(ty))$") +_SKIP_NODE_ATTRIBUTES = {"inplace", "aten"} + + +@dataclasses.dataclass +class GraphContext: + """Extra context for symbolic functions with all methods from torch.Graph. + + NOTE: This class is not meant for external consumption. Please do not depend on + it outside of torch.onnx as the interface may evolve. + + Attributes: + graph: The _C.Graph being constructed. + block: The current _C.Block being constructed. + opset: The opset version. + original_node: Current node that is being converted from. + params_dict: Mapping from graph initializer name to IValue. + env: Mapping from Torch domain graph Value to ONNX domain graph Value. + """ + + graph: _C.Graph + block: _C.Block + opset: int + original_node: _C.Node + params_dict: Dict[str, "_C.IValue"] + env: Dict[_C.Value, _C.Value] + + # Relay methods from _C.Graph for compatibility with symbolic functions that expect + # a _C.Graph + def __getattr__(self, name: str) -> Any: + return getattr(self.graph, name) + + @_beartype.beartype + def op( + self, + opname: str, + *raw_args: Union[torch.Tensor, _C.Value], + outputs: int = 1, + **kwargs, + ): + """Creates an ONNX operator "opname", taking "raw_args" as inputs and "kwargs" as attributes. + + The set of operators and the inputs/attributes they take + is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md + + Args: + opname: The ONNX operator name, e.g., `Abs` or `Add`, or an operator qualified + with a namespace, e.g., `aten::add`. + raw_args: The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + kwargs: The attributes of the ONNX operator, whose keys are named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + + Returns: + The value representing the single output of this operator (see the `outputs` + keyword argument for multi-return nodes). + """ + # FIXME(justinchuby): Add the return type back once we know how to handle mypy + return _add_op(self, opname, *raw_args, outputs=outputs, **kwargs) + + @_beartype.beartype + def aten_op(self, operator: str, *args, overload_name: str = "", **kwargs): + """Generates an ONNX ATen op node. + + This function is for backward compatibility with the old symbolic functions. + """ + return self.op( + "aten::ATen", + *args, + operator_s=operator, + overload_name_s=overload_name, + **kwargs, + ) + + # NOTE: For backward compatibility with the old symbolic functions. + # We are probably going to remove this only after the fx exporter is established. + at = aten_op + + @_beartype.beartype + def onnxscript_op( + self, + onnx_fn, # TODO(titaiwang): annotate this when onnx-script becomes dependency + *raw_args: Union[torch.Tensor, _C.Value], + outputs: int = 1, + **kwargs, + ): + """Creates an ONNX operator from onnx-script function, taking "raw_args" as inputs and "kwargs" as attributes. + + onnx-script repository: https://github.com/microsoft/onnx-script + + Args: + onnx_fn: ONNXFunction from onnx-script; An example can be found at + https://github.com/microsoft/onnx-script#example + raw_args: The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + kwargs: The attributes of the ONNX operator, whose keys are named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + + Returns: + The value representing the single output of this operator (see the `outputs` + keyword argument for multi-return nodes). + """ + # NOTE(titaiwang): This is using class attributes, and it needs to be updated + # if onnx-script makes any change on these. + symbolic_name = f"{onnx_fn.opset.domain}::{onnx_fn.name}" + opset_version = onnx_fn.opset.version + + registration.custom_onnx_symbolic(symbolic_name, opset_version)(onnx_fn) + + return _add_op(self, symbolic_name, *raw_args, outputs=outputs, **kwargs) + + +@_beartype.beartype +def add_op_with_blocks( + graph_context: GraphContext, + opname: str, + *inputs: _C.Value, + outputs: int = 1, + n_blocks: int = 1, + **attributes, +) -> Tuple[Any, Tuple[GraphContext, ...], _C.Node]: + """Creates an ONNX operator "opname", taking inputs and attributes. + + Args: + graph_context: The context for the current graph. + opname: The ONNX operator name, e.g., `Abs` or `Add`, or an operator qualified + with a namespace, e.g., `aten::add`. + inputs: The inputs to the operator. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + n_blocks: The number of sub-blocks to create in the node. + attributes: The attributes of the ONNX operator. + + Returns: + A tuple of (output_values, new_contexts, node) where: + output_values: One or more output value of this operator + (see the `outputs` keyword argument for multi-return nodes). + new_contexts: A tuple of new graph contexts for each sub-block. + node: The node representing the operator. + """ + + output_values = graph_context.op(opname, *inputs, outputs=outputs, **attributes) + if isinstance(output_values, Sequence): + node = output_values[0].node() + else: + node = output_values.node() + + new_contexts = [] + for _ in range(n_blocks): + new_block = node.addBlock() + # Create shallow copy of the graph context and update the block + new_context = dataclasses.replace(graph_context, block=new_block) + new_contexts.append(new_context) + + return output_values, tuple(new_contexts), node + + +@_beartype.beartype +def _add_op( + graph_context: GraphContext, + opname: str, + *args: Union[torch.Tensor, _C.Value], + outputs: int = 1, + **kwargs, +): + """Creates an ONNX operator "opname", taking "args" as inputs and attributes "kwargs". + + The set of operators and the inputs/attributes they take + is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md + + This function is monkey-patched onto Graph. + + Args: + graph_context: The Torch Graph or Block. + opname: The ONNX operator name, e.g., `Abs` or `Add`, or an operator qualified + with a namespace, e.g., `aten::add`. + args: The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + outputs: The number of outputs this operator returns. + By default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in order. + kwargs: The attributes of the ONNX operator, whose keys are named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + + Returns: + (Union[_C.Value, Tuple[_C.Value, ...]]) + The value representing the single output of this operator (see the `outputs` + keyword argument for multi-return nodes). + """ + inputs = [_const_if_tensor(graph_context, arg) for arg in args] + # Filter out None attributes, this can be convenient client side because + # now they can pass through None attributes, and have them not show up + attributes = {k: v for k, v in kwargs.items() if v is not None} + + if "::" not in opname: + opname = "onnx::" + opname + + node = _create_node( + graph_context.block, + opname, + inputs, + attributes, + params_dict=graph_context.params_dict, + opset_version=graph_context.opset, + n_outputs=outputs, + shape_inference=GLOBALS.onnx_shape_inference, + ) + + if outputs == 1: + return node.output() + return tuple(node.outputs()) + + +@_beartype.beartype +def _const_if_tensor(graph_context: GraphContext, arg): + if arg is None: + return arg + if isinstance(arg, _C.Value): + return arg + + return _add_op(graph_context, "onnx::Constant", value_z=arg) + + +def _create_node( + graph_or_block: Union[_C.Graph, _C.Block], + domain_op: str, + inputs: Sequence, + attributes: dict, + params_dict: dict, + opset_version: int, + n_outputs: int, + shape_inference: bool = True, +) -> _C.Node: + """Creates an node 'domain_op', taking inputs and attributes.""" + if isinstance(graph_or_block, _C.Graph): + graph = graph_or_block + node = graph.create(domain_op, inputs, n_outputs) + node = graph.insertNode(node) + elif isinstance(graph_or_block, _C.Block): + block = graph_or_block + node = block.addNode(domain_op, inputs) + + # Block does not have create defined, so we need to add outputs manually + if n_outputs > 1: + for _ in range(1, n_outputs): + node.addOutput() + + node_ouputs = tuple(node.outputs()) + assert len(node_ouputs) == n_outputs + + aten = domain_op.startswith("aten::") + + # Add all attributes + for key, value in sorted(attributes.items()): + if key in _SKIP_NODE_ATTRIBUTES: + continue + _add_attribute(node, key, value, aten=aten) + if shape_inference: + _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version) + return node + + +@_beartype.beartype +def _is_onnx_list(value): + return isinstance(value, Iterable) and not isinstance( + value, (str, bytes, torch.Tensor) + ) + + +@_beartype.beartype +def _scalar(x: torch.Tensor): + """Convert a scalar tensor into a Python value.""" + assert x.numel() == 1 + return x[0] + + +@_beartype.beartype +def _is_caffe2_aten_fallback() -> bool: + return ( + GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + and _C_onnx._CAFFE2_ATEN_FALLBACK + ) + + +@_beartype.beartype +def _add_attribute(node: _C.Node, key: str, value: Any, aten: bool): + r"""Initializes the right attribute based on type of value.""" + m = _ATTR_PATTERN.match(key) + if m is None: + raise ValueError( + f"Invalid attribute specifier '{key}' names " + "must be suffixed with type, e.g. 'dim_i' or 'dims_i'" + ) + name, kind = m.group(1), m.group(2) + if _is_onnx_list(value): + kind += "s" + + if aten and _is_caffe2_aten_fallback(): + if isinstance(value, torch.Tensor): + # Caffe2 proto does not support tensor attribute. + if value.numel() > 1: + raise ValueError("Should not pass tensor attribute") + value = _scalar(value) + if isinstance(value, float): + kind = "f" + else: + kind = "i" + return getattr(node, f"{kind}_")(name, value) + + +# TODO: Expose this to user when migrating symbolic helper functions to here. +@_beartype.beartype +def _is_tensor(x: _C.Value) -> bool: + return x.type().isSubtypeOf(_C.TensorType.get()) + + +@_beartype.beartype +def get_device_from_value(value: _C.Value) -> Optional[torch.device]: + if not _is_tensor(value): + return None + tensor_type = typing.cast(_C.TensorType, value.type()) + return tensor_type.device() + + +@_beartype.beartype +def parse_node_kind(kind: str) -> Tuple[str, str]: + """Parse node kind into domain and Op name.""" + if "::" not in kind: + raise ValueError(f"Node kind: {kind} is invalid. '::' is not in node kind.") + domain, opname = kind.split("::", 1) + if "::" in opname: + raise ValueError(f"Node kind: {kind} is invalid. '::' should only apear once.") + return domain, opname + + +@_beartype.beartype +def is_aten(domain: str) -> bool: + """Check if the domain is official.""" + return domain == "aten" + + +@_beartype.beartype +def is_prim(domain: str) -> bool: + """Check if the domain is official.""" + return domain == "prim" + + +@_beartype.beartype +def is_onnx(domain: str) -> bool: + """Check if the domain is official.""" + return domain == "onnx" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/onnx_proto_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/onnx_proto_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..52c37ddc8d0102f6c2f7fe891c236b33e7aa0747 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/onnx_proto_utils.py @@ -0,0 +1,296 @@ +"""Utilities for manipulating the onnx and onnx-script dependencies and ONNX proto.""" + +from __future__ import annotations + +import glob +import io +import os +import shutil +import zipfile +from typing import Any, List, Mapping, Set, Tuple, Union + +import torch +import torch.jit._trace +import torch.serialization +from torch.onnx import _constants, _exporter_states, errors +from torch.onnx._internal import _beartype, jit_utils, registration + + +@_beartype.beartype +def export_as_test_case( + model_bytes: bytes, inputs_data, outputs_data, name: str, dir: str +) -> str: + """Export an ONNX model as a self contained ONNX test case. + + The test case contains the model and the inputs/outputs data. The directory structure + is as follows: + + dir + ├── test_ + │ ├── model.onnx + │ └── test_data_set_0 + │ ├── input_0.pb + │ ├── input_1.pb + │ ├── output_0.pb + │ └── output_1.pb + + Args: + model_bytes: The ONNX model in bytes. + inputs_data: The inputs data, nested data structure of numpy.ndarray. + outputs_data: The outputs data, nested data structure of numpy.ndarray. + + Returns: + The path to the test case directory. + """ + try: + import onnx + except ImportError as exc: + raise ImportError( + "Export test case to ONNX format failed: Please install ONNX." + ) from exc + + test_case_dir = os.path.join(dir, "test_" + name) + os.makedirs(test_case_dir, exist_ok=True) + _export_file( + model_bytes, + os.path.join(test_case_dir, "model.onnx"), + _exporter_states.ExportTypes.PROTOBUF_FILE, + {}, + ) + data_set_dir = os.path.join(test_case_dir, "test_data_set_0") + if os.path.exists(data_set_dir): + shutil.rmtree(data_set_dir) + os.makedirs(data_set_dir) + + proto = onnx.load_model_from_string(model_bytes) # type: ignore[attr-defined] + + for i, (input_proto, input) in enumerate(zip(proto.graph.input, inputs_data)): + export_data(input, input_proto, os.path.join(data_set_dir, f"input_{i}.pb")) + for i, (output_proto, output) in enumerate(zip(proto.graph.output, outputs_data)): + export_data(output, output_proto, os.path.join(data_set_dir, f"output_{i}.pb")) + + return test_case_dir + + +@_beartype.beartype +def load_test_case(dir: str) -> Tuple[bytes, Any, Any]: + """Load a self contained ONNX test case from a directory. + + The test case must contain the model and the inputs/outputs data. The directory structure + should be as follows: + + dir + ├── test_ + │ ├── model.onnx + │ └── test_data_set_0 + │ ├── input_0.pb + │ ├── input_1.pb + │ ├── output_0.pb + │ └── output_1.pb + + Args: + dir: The directory containing the test case. + + Returns: + model_bytes: The ONNX model in bytes. + inputs: the inputs data, mapping from input name to numpy.ndarray. + outputs: the outputs data, mapping from output name to numpy.ndarray. + """ + try: + import onnx + from onnx import numpy_helper + except ImportError as exc: + raise ImportError( + "Load test case from ONNX format failed: Please install ONNX." + ) from exc + + with open(os.path.join(dir, "model.onnx"), "rb") as f: + model_bytes = f.read() + + test_data_dir = os.path.join(dir, "test_data_set_0") + + inputs = {} + input_files = glob.glob(os.path.join(test_data_dir, "input_*.pb")) + for input_file in input_files: + tensor = onnx.load_tensor(input_file) # type: ignore[attr-defined] + inputs[tensor.name] = numpy_helper.to_array(tensor) + outputs = {} + output_files = glob.glob(os.path.join(test_data_dir, "output_*.pb")) + for output_file in output_files: + tensor = onnx.load_tensor(output_file) # type: ignore[attr-defined] + outputs[tensor.name] = numpy_helper.to_array(tensor) + + return model_bytes, inputs, outputs + + +@_beartype.beartype +def export_data(data, value_info_proto, f: str) -> None: + """Export data to ONNX protobuf format. + + Args: + data: The data to export, nested data structure of numpy.ndarray. + value_info_proto: The ValueInfoProto of the data. The type of the ValueInfoProto + determines how the data is stored. + f: The file to write the data to. + """ + try: + from onnx import numpy_helper + except ImportError as exc: + raise ImportError( + "Export data to ONNX format failed: Please install ONNX." + ) from exc + + with open(f, "wb") as opened_file: + if value_info_proto.type.HasField("map_type"): + opened_file.write( + numpy_helper.from_dict(data, value_info_proto.name).SerializeToString() + ) + elif value_info_proto.type.HasField("sequence_type"): + opened_file.write( + numpy_helper.from_list(data, value_info_proto.name).SerializeToString() + ) + elif value_info_proto.type.HasField("optional_type"): + opened_file.write( + numpy_helper.from_optional( + data, value_info_proto.name + ).SerializeToString() + ) + else: + assert value_info_proto.type.HasField("tensor_type") + opened_file.write( + numpy_helper.from_array(data, value_info_proto.name).SerializeToString() + ) + + +@_beartype.beartype +def _export_file( + model_bytes: bytes, + f: Union[io.BytesIO, str], + export_type: str, + export_map: Mapping[str, bytes], +) -> None: + """export/write model bytes into directory/protobuf/zip""" + # TODO(titaiwang) MYPY asks for os.PathLike[str] type for parameter: f, + # but beartype raises beartype.roar.BeartypeDecorHintNonpepException, + # as os.PathLike[str] uncheckable at runtime + if export_type == _exporter_states.ExportTypes.PROTOBUF_FILE: + assert len(export_map) == 0 + with torch.serialization._open_file_like(f, "wb") as opened_file: + opened_file.write(model_bytes) + elif export_type in { + _exporter_states.ExportTypes.ZIP_ARCHIVE, + _exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE, + }: + compression = ( + zipfile.ZIP_DEFLATED + if export_type == _exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE + else zipfile.ZIP_STORED + ) + with zipfile.ZipFile(f, "w", compression=compression) as z: + z.writestr(_constants.ONNX_ARCHIVE_MODEL_PROTO_NAME, model_bytes) + for k, v in export_map.items(): + z.writestr(k, v) + elif export_type == _exporter_states.ExportTypes.DIRECTORY: + if isinstance(f, io.BytesIO) or not os.path.isdir(f): # type: ignore[arg-type] + raise ValueError( + f"f should be directory when export_type is set to DIRECTORY, instead get type(f): {type(f)}" + ) + if not os.path.exists(f): # type: ignore[arg-type] + os.makedirs(f) # type: ignore[arg-type] + + model_proto_file = os.path.join(f, _constants.ONNX_ARCHIVE_MODEL_PROTO_NAME) # type: ignore[arg-type] + with torch.serialization._open_file_like(model_proto_file, "wb") as opened_file: + opened_file.write(model_bytes) + + for k, v in export_map.items(): + weight_proto_file = os.path.join(f, k) # type: ignore[arg-type] + with torch.serialization._open_file_like( + weight_proto_file, "wb" + ) as opened_file: + opened_file.write(v) + else: + raise ValueError("Unknown export type") + + +@_beartype.beartype +def _add_onnxscript_fn( + model_bytes: bytes, + custom_opsets: Mapping[str, int], +) -> bytes: + """Insert model-included custom onnx-script function into ModelProto""" + # TODO(titaiwang): remove this when onnx becomes dependency + try: + import onnx + except ImportError as e: + raise errors.OnnxExporterError("Module onnx is not installed!") from e + + # For > 2GB model, onnx.load_fromstring would fail. However, because + # in _export_onnx, the tensors should be saved separately if the proto + # size > 2GB, and if it for some reason did not, the model would fail on + # serialization anyway in terms of the protobuf limitation. So we don't + # need to worry about > 2GB model getting here. + model_proto = onnx.load_model_from_string(model_bytes) # type: ignore[attr-defined] + + # Iterate graph nodes to insert only the included custom + # function_proto into model_proto + # TODO(titaiwang): Currently, onnxscript doesn't support ONNXFunction + # calling other ONNXFunction scenario, neither does it here + onnx_function_list = list() # type: ignore[var-annotated] + included_node_func = set() # type: Set[str] + # onnx_function_list and included_node_func are expanded in-place + _find_onnxscript_op( + model_proto.graph, included_node_func, custom_opsets, onnx_function_list + ) + + if onnx_function_list: + model_proto.functions.extend(onnx_function_list) + model_bytes = model_proto.SerializeToString() + return model_bytes + + +@_beartype.beartype +def _find_onnxscript_op( + graph_proto, + included_node_func: Set[str], + custom_opsets: Mapping[str, int], + onnx_function_list: List, +): + """Recursively iterate ModelProto to find ONNXFunction op as it may contain control flow Op.""" + for node in graph_proto.node: + node_kind = node.domain + "::" + node.op_type + # Recursive needed for control flow nodes: IF/Loop which has inner graph_proto + for attr in node.attribute: + if attr.g is not None: + _find_onnxscript_op( + attr.g, included_node_func, custom_opsets, onnx_function_list + ) + # Only custom Op with ONNX function and aten with symbolic_fn should be found in registry + onnx_function_group = registration.registry.get_function_group(node_kind) + # Ruled out corner cases: onnx/prim in registry + if ( + node.domain + and not jit_utils.is_aten(node.domain) + and not jit_utils.is_prim(node.domain) + and not jit_utils.is_onnx(node.domain) + and onnx_function_group is not None + and node_kind not in included_node_func + ): + specified_version = custom_opsets.get(node.domain, 1) + onnx_fn = onnx_function_group.get(specified_version) + if onnx_fn is not None: + # TODO(titaiwang): to_function_proto is onnx-script API and can be annotated + # after onnx-script is dependency + if hasattr(onnx_fn, "to_function_proto"): + onnx_function_proto = onnx_fn.to_function_proto() # type: ignore[attr-defined] + onnx_function_list.append(onnx_function_proto) + included_node_func.add(node_kind) + continue + + raise errors.UnsupportedOperatorError( + node_kind, + specified_version, + onnx_function_group.get_min_supported() + if onnx_function_group + else None, + ) + return onnx_function_list, included_node_func diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/onnxruntime.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/onnxruntime.py new file mode 100644 index 0000000000000000000000000000000000000000..061e7fb38656be691760178022986c148b7e862f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/onnxruntime.py @@ -0,0 +1,1048 @@ +import dataclasses +import importlib +import logging + +from typing import ( + Any, + Dict, + Final, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +from typing_extensions import TypeAlias + +import torch +import torch._C +import torch._ops +import torch._prims.executor +import torch.fx +from torch._subclasses.fake_tensor import FakeTensor +from torch.fx._compatibility import compatibility +from torch.fx.passes.fake_tensor_prop import FakeTensorProp +from torch.fx.passes.operator_support import OperatorSupport +from torch.fx.passes.tools_common import CALLABLE_NODE_OPS +from torch.utils import _pytree + +try: + # Use try-except to initialize package-dependent global variables. + import onnx + import onnxruntime # type: ignore[import] + from onnxruntime.capi import _pybind_state as ORTC # type: ignore[import] + + # This is not use directly in DORT but needed by underlying exporter, + # so we still need to check if it exists. + importlib.import_module("onnxscript") + + import torch.onnx + import torch.onnx._internal + import torch.onnx._internal.diagnostics + import torch.onnx._internal.exporter + import torch.onnx._internal.fx.decomposition_table + import torch.onnx._internal.fx.passes + from torch.onnx._internal.fx import fx_onnx_interpreter + from torch.onnx._internal.fx.type_utils import ( + _TORCH_DTYPE_TO_NUMPY_DTYPE, + _TORCH_DTYPE_TO_ONNX_TENSOR_ELEMENT_TYPE, + ) + + _SUPPORT_ONNXRT = True +except ImportError: + _SUPPORT_ONNXRT = False + +__all__ = [ + "is_onnxrt_backend_supported", + "torch_compile_backend", + "OrtExecutionProvider", + "OrtBackendOptions", + "OrtBackend", +] + + +def is_onnxrt_backend_supported() -> bool: + """Returns ``True`` if ONNX Runtime dependencies are installed and usable + to support TorchDynamo backend integration; ``False`` otherwise. + + Example:: + + # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> import torch + >>> if torch.onnx.is_onnxrt_backend_supported(): + ... @torch.compile(backend="onnxrt") + ... def f(x): + ... return x * x + ... print(f(torch.randn(10))) + ... else: + ... print("pip install onnx onnxscript onnxruntime") + ... + """ + return _SUPPORT_ONNXRT + + +def _infer_default_eps() -> Sequence[str]: + # TODO: select a good default based on the capabilities of the host + # e.g. DML on Windows, etc. + return ["CPUExecutionProvider"] + + +def _nvtx_range_push(name: str): + """If PyTorch is installed with CUDA support, this starts NVTX range. + + Check torch.cuda.nvtx.range_push's document for more details. + """ + if torch.cuda.is_available(): + torch.cuda.nvtx.range_push(name) + + +def _nvtx_range_pop(): + """If PyTorch is installed with CUDA support, this terminates NVTX range. + + Check torch.cuda.nvtx.range_pop's document for more details. + """ + if torch.cuda.is_available(): + torch.cuda.nvtx.range_pop() + + +def _get_ort_device_type(device_type: str): + if device_type == "cuda": + return ORTC.OrtDevice.cuda() + if device_type == "cpu": + return ORTC.OrtDevice.cpu() + # ort pytorch device is mapped to NPU OrtDevice type + if device_type == "ort": + return ORTC.OrtDevice.npu() + raise ValueError("Unsupported device type: " + device_type) + + +logger = logging.getLogger(__name__) +# Uncomment the following lines to print out development info. +# logging.basicConfig(level=logging.WARNING) +# logger.setLevel(logging.WARNING) + + +class OrtOperatorSupport(OperatorSupport): + """Operator support for ONNXRuntime backend. + + It has two-level of support decision. One is via support_dict and the other one + is via extra_support_dict. The logic of using support_dict is implemented in + OrtOperatorSupport and extra_support_dict is used by OperatorSupport.is_node_supported. + """ + + def __init__(self, support_dict: Set[Any], extra_support_dict: Dict[str, Any]): + # Use extra_support_dict[op_name] = None to indicate + # we support op_name with all input types. Otherwise, + # see support_dict (type: SupportDict) in operator_support.py + # for specifying supported types. + super().__init__(extra_support_dict) + self._onnx_support_dict = support_dict + + def is_node_supported( + self, submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node + ) -> bool: + # OperatorSupport.is_node_supported returns True for non-callable nodes. + # Since ORT can't execute them, we return False here to override the base + # behavior. + if node.op not in CALLABLE_NODE_OPS: + return False + # This is the and the only place to decide if aten op is supported. + if node.op == "call_function" and node.target in self._onnx_support_dict: + logger.warning( + "support_dict supports node.target: %s (type: %s)", + node.target, + type(node.target), + ) + return True + logger.warning( + "support_dict doesn't support node.target: %s (type: %s)", + node.target, + type(node.target), + ) + # If node.target is not in support_dict, we still want to check if torch.jit.script + # can convert it to ONNX equivalence. Let's use base mechanism to do this. + # See extra_support_dict for supported ops. + if super().is_node_supported(submodules, node): + logger.warning( + "extra_support_dict supports node.target: %s (type: %s)", + node.target, + type(node.target), + ) + return True + logger.warning( + "extra_support_dict doesn't supports node.target: %s (type: %s)", + node.target, + type(node.target), + ) + return False + + +def _move_placeholder_to_front(graph_module: torch.fx.GraphModule) -> None: + """ + In torch.fx.Graph, placeholder is a special assignment node. If it's not + executed in the beginning, it could overwrite values computed by upstream + nodes. + """ + + graph = graph_module.graph + placeholders = [] + first_not_placeholder = None + for node in graph.nodes: + if node.op == "placeholder": + placeholders.append(node) + if first_not_placeholder is None and node.op != "placeholder": + first_not_placeholder = node + if first_not_placeholder is None: + return + for placeholder in placeholders: + first_not_placeholder.prepend(placeholder) + + +def _replace_to_copy_with_to(fx_module: torch.fx.GraphModule) -> None: + # aten._to_copy doesn't have exporter so we replace it with aten.to. + for node in fx_module.graph.nodes: + if ( + isinstance(node.target, torch._ops.OpOverload) + and node.target.overloadpacket == torch.ops.aten._to_copy + ): + is_default_layout = True + is_on_same_device = True + is_cast = True + are_kwargs_supported = True + if "layout" in node.kwargs and node.kwargs["layout"] != torch.strided: + is_default_layout = False + if ( + "device" in node.kwargs + and node.kwargs["device"] != node.args[0].meta["val"].device + ): + is_on_same_device = False + if "dtype" not in node.kwargs: + is_cast = False + for kwarg in node.kwargs: + if kwarg not in ["layout", "device", "dtype"]: + are_kwargs_supported = False + + if ( + len(node.args) == 1 + and is_default_layout + and is_on_same_device + and is_cast + and are_kwargs_supported + ): + # This aten::_to_copy looks like ONNX Cast, so other kwargs are ignored. + # This change could lead to invalid FX graph but it doesn't matter, as long as the downstream backend, + # ONNXRuntime, can execute the exported ONNX graph. + node.kwargs = {"dtype": node.kwargs["dtype"]} + + node.target = torch.ops.aten.to.dtype + else: + raise RuntimeError( + f"aten._to_copy must be replaced with other ONNX-supported aten ops. \ + args={[arg.meta for arg in node.args]}, kwargs={node.kwargs}" + ) + fx_module.recompile() + + +def _infer_ep_from_device(*args) -> Tuple[str, ...]: + """Return the first valid device (i.e., GPU or CPU) in argument list.""" + eps = [] + for arg in args: + if hasattr(arg, "device"): + device = arg.device + if device.type == "cuda": + eps.append("CUDAExecutionProvider") + elif device.type == "cpu": + eps.append("CPUExecutionProvider") + return tuple(eps) + + +def _extract_graph_module_inputs(graph_module: torch.fx.GraphModule) -> Tuple[Any, ...]: + placeholders = [] + for node in graph_module.graph.nodes: + if node.op == "placeholder": + if hasattr(node, "meta") and "val" in node.meta: + assert isinstance(node.meta["val"], torch.Tensor) + placeholders.append(node) + return tuple(placeholders) + + +def _extract_graph_module_outputs(graph_module: torch.fx.GraphModule) -> Any: + """Collect "val" fields from outputs metadata in this torch.fx.GraphModule.""" + for node in graph_module.graph.nodes: + if node.op == "output": + # Output node is unique. Let's retrieve output values from + # this node's input list. And then just return. + return node.args[0] + raise ValueError("No output node found in this torch.fx.GraphModule.") + + +def _infer_ep_from_graph_module(graph_module: torch.fx.GraphModule) -> Tuple[str, ...]: + """Return the all valid devices (i.e., GPU or CPU) among outputs of this torch.fx.GraphModule.""" + flattened_output_args, _ = _pytree.tree_flatten( + _extract_graph_module_outputs(graph_module) + ) + # Output arguments with example value (type: torch.Tensor) in the `graph_module`. + selected_output_args = [ + output_arg.meta["val"] + for output_arg in flattened_output_args + # output_arg must have tensor for its device information. + # Otherwise, skip it. + if (hasattr(output_arg, "meta") and "val" in output_arg.meta) + ] + return _infer_ep_from_device(*selected_output_args) + + +def _sort_eps(eps: Tuple[str, ...]) -> Tuple[str, ...]: + """Sort execution providers in eps based on pre-set priority.""" + + def get_execution_provider_priority(ep: str) -> int: + if ep == "CPUExecutionProvider": + # Lowest priority. + return 2 + if ep == "CUDAExecutionProvider": + # Higher priority than CPU but lower than + # other specialized EPs. + return 1 + # Highest priority. + return 0 + + unique_eps = set(eps) + return tuple(sorted(unique_eps, key=get_execution_provider_priority, reverse=True)) + + +def _get_onnx_devices(values: Tuple[torch.Tensor, ...]) -> Tuple["ORTC.OrtDevice", ...]: + assert all( + value.device == values[0].device for value in values + ), "All values must be on the same device." + + def _device_id_or_zero(device_id: int) -> int: + return device_id or 0 + + devices: Tuple["ORTC.OrtDevice", ...] = tuple( + ORTC.OrtDevice( + _get_ort_device_type(value.device.type), + ORTC.OrtDevice.default_memory(), + _device_id_or_zero(value.device.index), + ) + for value in values + ) + return devices + + +def _get_ortvalues_from_torch_tensors( + tensors: Tuple[torch.Tensor, ...], devices: Tuple["ORTC.OrtDevice", ...] +) -> Tuple[torch.Tensor, ...]: + ortvalues = ORTC.OrtValueVector() + ortvalues.reserve(len(tensors)) + dtypes = [] + shapes = [] + data_ptrs = [] + + for tensor in tensors: + dtypes.append(_TORCH_DTYPE_TO_NUMPY_DTYPE[tensor.dtype]) + shapes.append(tensor.size()) + data_ptrs.append(tensor.data_ptr()) + ortvalues.push_back_batch(tensors, data_ptrs, dtypes, shapes, devices) + return ortvalues + + +def _to_real_tensor(tensor: FakeTensor) -> torch.Tensor: + if tensor.is_sparse: + raise ValueError("sparse tensor is not yet supported.") + out = torch.empty(tensor.size(), dtype=tensor.dtype, device=tensor.device) + return out + + +def _run_onnx_session_with_ortvaluevector( + sess: "onnxruntime.InferenceSession", + input_names: Tuple[str, ...], + inputs: Tuple[torch.Tensor, ...], + input_devices: Tuple["ORTC.OrtDevice", ...], + output_names: Tuple[str, ...], + outputs: Tuple[torch.Tensor, ...], + output_devices: Tuple["ORTC.OrtDevice", ...], + preallocate_output: bool, +) -> Tuple[torch.Tensor, ...]: + _nvtx_range_push("contiguous") + inputs = tuple(a.contiguous() for a in inputs) + _nvtx_range_pop() + + _nvtx_range_push("push_back_batch") + + ort_inputs = _get_ortvalues_from_torch_tensors(inputs, input_devices) + + # preallocate output pytorch Tensors and use the buffers affined to the torch device for the output ortvalue. + # Because the output ortvalue is not allocated and owned by ort, it does not need to convert the output ortvalue + # to torch Tensor transferring the ownership. + if preallocate_output: + pth_outputs = tuple( + _to_real_tensor(t) if isinstance(t, FakeTensor) else t for t in outputs + ) + ort_outputs = _get_ortvalues_from_torch_tensors(pth_outputs, output_devices) + else: + ort_outputs = ORTC.OrtValueVector() + _nvtx_range_pop() + + _nvtx_range_push("run_with_ortvaluevector") + run_options = onnxruntime.RunOptions() + run_options.add_run_config_entry("disable_synchronize_execution_providers", "1") + sess.run_with_ortvaluevector( + run_options, input_names, ort_inputs, output_names, ort_outputs, output_devices + ) + _nvtx_range_pop() + + if preallocate_output: + return pth_outputs + else: + _nvtx_range_push("after run_with_ortvaluevector") + pth_outputs = onnxruntime.training.ortmodule._utils._ortvalues_to_torch_tensor( + ort_outputs + ) + _nvtx_range_pop() + return pth_outputs + + +def _run_onnx_session_with_fetch( + sess: "onnxruntime.InferenceSession", + input_names: Tuple[str, ...], + inputs: Tuple[torch.Tensor, ...], + input_devices: Tuple["ORTC.OrtDevice", ...], + output_names: Tuple[str, ...], + outputs: Tuple[torch.Tensor, ...], + output_devices: Tuple["ORTC.OrtDevice", ...], + preallocate_output: bool, +) -> Tuple[torch.Tensor, ...]: + feed = { + name: onnxruntime.OrtValue.ortvalue_from_numpy(tensor.cpu().numpy()) + for name, tensor in zip(input_names, inputs) + } + ort_outputs = sess.run(output_names, feed) + pth_outputs = tuple( + torch.from_numpy(value).to(tensor.device) + for value, tensor in zip(ort_outputs, outputs) + ) + return pth_outputs + + +class OrtExecutionInfoPerSession: + """Information required to execute torch.fx.GraphModule using onnxruntime.InferenceSession""" + + def __init__( + self, + session: "onnxruntime.InferenceSession", + input_names: Tuple[str, ...], + input_value_infos: Tuple["onnx.ValueInfoProto", ...], # type: ignore[name-defined] + output_names: Tuple[str, ...], + output_value_infos: Tuple["onnx.ValueInfoProto", ...], # type: ignore[name-defined] + input_devices: Tuple["ORTC.OrtDevice", ...], + output_devices: Tuple["ORTC.OrtDevice", ...], + example_outputs: Union[Tuple[torch.Tensor, ...], torch.Tensor], + ): + # Carrier of ONNX model and its executor. + self.session: onnxruntime.InferenceSession = session + # For the ONNX model stored in self.session, self.input_names[i] is the + # name of the i-th positional input. + self.input_names: Tuple[str, ...] = input_names + # self.input_name[i]'s type information is stored in self.input_value_infos[i]. + self.input_value_infos: Tuple[onnx.ValueInfoProto, ...] = input_value_infos # type: ignore[name-defined] + # Similar to self.input_names, but for outputs. + self.output_names: Tuple[str, ...] = output_names + # Similar to self.input_value_infos but for outputs. + self.output_value_infos: Tuple[onnx.ValueInfoProto, ...] = output_value_infos # type: ignore[name-defined] + # For the ONNX model stored in self.session, self.input_devices[i] is the + # i-th positional input's device. + self.input_devices: Tuple["ORTC.OrtDevice", ...] = input_devices + # Similar to self.input_devices, but for outputs. + self.output_devices: Tuple["ORTC.OrtDevice", ...] = output_devices + # This is the outputs of executing the original torch.fx.GraphModule with example inputs + # (i.e., args passed into OrtBackend._ort_acclerated_call). + self.example_outputs: Union[ + Tuple[torch.Tensor, ...], torch.Tensor + ] = example_outputs + + def is_supported(self, *args): + # Compare the args and the input schema in ONNX model and + # return the first match. + if len(args) != len(self.input_value_infos): + return False + for arg, value_info in zip(args, self.input_value_infos): + if not isinstance(arg, torch.Tensor): + return False + onnx_dtype = _TORCH_DTYPE_TO_ONNX_TENSOR_ELEMENT_TYPE[arg.dtype] + if onnx_dtype != value_info.type.tensor_type.elem_type: + return False + for dim, onnx_dim in zip(arg.shape, value_info.type.tensor_type.shape.dim): + if isinstance(dim, int) and ( + onnx_dim.dim_value == dim or onnx_dim.dim_param + ): + continue + elif isinstance(dim, torch.SymInt) and onnx_dim.dim_param: + continue + else: + return False + return True + + +@dataclasses.dataclass +class OrtExecutionInfoForAllGraphModules: + def __init__(self): + # All sessions (and their related information) created by exporting the same GraphModule + # with different inputs. + self.execution_info_per_graph_module: Dict[ + torch.fx.GraphModule, List[OrtExecutionInfoPerSession] + ] = {} + + def search_reusable_session_execution_info( + self, graph_module: torch.fx.GraphModule, *args + ): + if graph_module not in self.execution_info_per_graph_module: + return None + # All execution information for ONNX models exported from the same `graph_module` + # with different inputs. + candidates = self.execution_info_per_graph_module[graph_module] + + for candidate in candidates: + if candidate.is_supported(*args): + # Returns the first session that accepts this input schema. + return candidate + # No reusable session found. + return None + + def cache_session_execution_info( + self, graph_module: torch.fx.GraphModule, info: OrtExecutionInfoPerSession + ): + if graph_module not in self.execution_info_per_graph_module: + self.execution_info_per_graph_module[graph_module] = [info] + else: + self.execution_info_per_graph_module[graph_module].append(info) + + +OrtExecutionProvider: TypeAlias = Union[str, Tuple[str, Mapping[str, Any]]] +"""Either the name of an ONNX Runtime execution provider as a string or +a 2-tuple of the name and a dictionary of execution provider options. + +Examples:: + + >>> "CPUExecutionProvider" + + >>> ("CUDAExecutionProvider", {"device_id": 3}) + +""" + + +@dataclasses.dataclass(frozen=True) +@compatibility(is_backward_compatible=False) +class OrtBackendOptions: + """Options for constructing an ``OrtBackend``, the ONNX Runtime + backend (``"onnxrt"``) for ``torch.compile``. + + Example:: + + >>> @torch.compile( + ... backend="onnxrt", + ... options=torch.onnx._OrtBackendOptions(...), + ... ) + ... def ort_function(x): + ... return x ** x + """ + + preferred_execution_providers: Optional[Sequence[OrtExecutionProvider]] = None + """An optional sequence of execution providers to be prioritized ahead of any + execution providers that may be inferred (see ``infer_execution_providers``). + """ + + infer_execution_providers: bool = True + """Whether to infer an execution provider from ``torch.device`` bound to inputs or found in the graph.""" + + default_execution_providers: Optional[Sequence[OrtExecutionProvider]] = None + """The default fallback execution providers. If not specified, one will be + be selected based on the host environment (most likely ``"CPUExecutionProvider"``). + """ + + # preallocate_output allows for allocating output torch Tensor buffers and feeding them to InferenceSession + # in order to avoid internal allocation of output buffers in InferenceSession. + # If output ortvalue returned from InferenceSession is allocated internally, + # it needs to be converted to torch Tensor for return, and the torch Tensor should hold the ownership. + # When a custom torch device is used with a custom aten allocator, the conversion from ortvalue to torch Tensor + # should be supported, which is currently done through dlpack. Note that dlpack might not support a custom torch device. + # It can be avoided by allowing for preallocation for output buffers allocated by a custom aten allocator, + # and use the preallocated output buffers for InferenceSession not holding any ownership for them. + # TODO(wschin): Make it to inference session level flag. + # See https://github.com/pytorch/pytorch/issues/106869. + preallocate_output: bool = False + """If ``True``, allocate memory for ONNX Runtime's outputs on the PyTorch side.""" + + use_aot_autograd: bool = True + """Whether to wrap the ``OrtBackend`` with TorchDynamo's aot_autograd backend + to support training (i.e., backward graphs are also sent to ``OrtBackend``). + + Symbolic execution is used to capture the forward pass and backward passes as a single graph. + Then, a selected graph partition algorithm (``min_cut_rematerialization_partition``) is used + to split the entire graph into forward sub-graph and backward sub-graph. Finally, both + sub-graphs are compiled by ``OrtBackend``. + """ + + export_options: Optional["torch.onnx.ExportOptions"] = None + """Options for the TorchDynamo-based ONNX exporter used by the ``OrtBackend``.""" + + ort_session_options: Optional["onnxruntime.SessionOptions"] = None + """Options for the ``onnxruntime.InferenceSession`` used by the ``OrtBackend``.""" + + +@compatibility(is_backward_compatible=False) +class OrtBackend: + """A backend compiles (sub-)graphs in torch.fx.GraphModule to onnxruntime.InferenceSession calls. + + The compiler entry point is OrtBackend.compile, which + 1. partitions the original graph into supported sub-graphs (type: torch.fx.GraphModule) and unsupported + sub-graphs. + 2. For each supported sub-graph, it replaces its _wrapped_call function with _ort_accelerated_call. + 3. Inside _ort_accelerated_call, it creates onnxruntime.InferenceSession and calls it to execute the sub-graph. + """ + + def __init__(self, options: Optional[OrtBackendOptions] = None): + self._options: Final = OrtBackendOptions() if options is None else options + + # options.export_options contains information shared between exporter and DORT. + # For example, they should use the same decomposition table when + # 1. capturing FX graph in torch.compile (see how we create aot_ort in register_backend.py) + # 2. call exporter's API to convert `torch.fx.GraphModule` to ONNX model + # (see onnxfunction_dispatcher passed to FxOnnxInterpreter.run below). + # + # Convert user-facing option to internal option used by ONNX exporter + # to access required information. + # Some useful fields: + # - Decomposition table for decomposing FX operators in exporter is + # self._resolved_onnx_exporter_options.decomposition_table. + # - self._resolved_onnx_exporter_options.onnx_registry records what + # aten/prim ops are supported by exporter and their exporters (type: callable). + self._resolved_onnx_exporter_options = ( + torch.onnx._internal.exporter.ResolvedExportOptions( + torch.onnx.ExportOptions() + if self._options.export_options is None + else self._options.export_options + ) + ) + + # Given DORT's computation flow: + # 1. OrtOperatorSupport uses support_dict and extra_support_dict to select operators + # and send them to DORT. + # 2. Then, DORT exports the selected sub-graphs into ONNX. + # 3. Finally DORT calls ORT to do the computation. + # OrtOperatorSupport and create_onnx_friendly_decomposition_table(...) + # must use the same support_dict. If the support_dict here contains something not + # supported by exporter, exporter will fails in step 2 since the selected graphs may + # contains unsupported operators such as aten::_who_you_are. + # This restriction is automatically done since DORT and exporter shares the same + # self._resolved_onnx_exporter_options. + support_dict = torch.onnx._internal.fx.decomposition_table._create_onnx_supports_op_overload_table( + self._resolved_onnx_exporter_options.onnx_registry + ) + + extra_support_dict: Dict[str, Any] = { + "getattr": None, + "_operator.getitem": None, + } + + self._supported_ops = OrtOperatorSupport(support_dict, extra_support_dict) + # TODO(wschin): this is a naive implementation of cache without proper guard + # See https://github.com/pytorch/pytorch/issues/106868. + self._partitioner_cache: Dict[torch.fx.GraphModule, torch.fx.GraphModule] = {} + # Conceptually, this filed is a 2-layer dictionary + # GraphModule 0 + # ONNX Model 0 (with ORT InferenceSession and related information. type: OrtExecutionInfoPerSession) + # ONNX Model 1 + # ... + # GraphModule 1 + # ONNX Model 2 (with ORT InferenceSession and related information. type: OrtExecutionInfoPerSession) + # ONNX Model 3 + # ... + # ... + # , which caches all previous compilation result so that we can reuse them. + # ONNX Model 0 and 1 are exported from the same GraphModule 0 but with different inputs + # (e.g., tensors with different ranks). GraphModule 0 and GraphModule 1 are different + # graphs captured by Dynamo and sent to OrtBackend.compile. + self._all_ort_execution_info = OrtExecutionInfoForAllGraphModules() + + self._assert_allclose_to_baseline = False + + self.execution_count = 0 + + # Function which invokes ORT do to the real computation. + self.run = ( + _run_onnx_session_with_ortvaluevector + if hasattr(ORTC, "push_back_batch") + else _run_onnx_session_with_fetch + ) + + def _select_eps( + self, graph_module: torch.fx.GraphModule, *args + ) -> Sequence[Tuple[str, Mapping[str, Any]]]: + inferred_eps: Tuple[str, ...] = tuple() + if self._options.infer_execution_providers: + if eps_from_args := _infer_ep_from_device(*args): + # If user feeds CUDA tensor as input argument, + # we want to use CUDA EP. + # Thus, `eps_from_args` (deduced from input arguments) + # has highest priority. + inferred_eps = eps_from_args + elif eps_from_graph_module := _infer_ep_from_graph_module(graph_module): + # If there is no EP in input arguments, we deduce EP from + # graph_module's outputs. Those outputs may come from + # FakeTensorProp or Dynamo's built-in symbolic shape inference. + inferred_eps = eps_from_graph_module + + selected_eps = [] + + for ep in ( + *(self._options.preferred_execution_providers or []), + *_sort_eps(inferred_eps), + *(self._options.default_execution_providers or _infer_default_eps()), + ): + if isinstance(ep, str): + ep = (ep, {}) + elif isinstance(ep, tuple) and ep[1] is None: + ep = (ep[0], {}) + if ep is not None and ep not in selected_eps: + selected_eps.append(ep) + + return selected_eps + + def _ort_acclerated_call(self, graph_module: torch.fx.GraphModule, *args, **kwargs): + """This function replaces GraphModule._wrapped_call in compiled model. + + The _wrapped_call is the underlying implementation of forward method. Replacing + it means we delegate the computation to _ort_acclerated_call and therefore + onnxruntime.InferenceSession. + """ + cached_execution_info_per_session = ( + self._all_ort_execution_info.search_reusable_session_execution_info( + graph_module, *args + ) + ) + if cached_execution_info_per_session: + onnx_session = cached_execution_info_per_session.session + input_names = cached_execution_info_per_session.input_names + output_names = cached_execution_info_per_session.output_names + input_devices = cached_execution_info_per_session.input_devices + output_devices = cached_execution_info_per_session.output_devices + prim_outputs = cached_execution_info_per_session.example_outputs + else: + # It's first time seeing such as graph. Let's make a new session + # (type: onnxruntime.InferenceSession) for it. + + graph_module = torch.onnx._internal.fx.passes.MovePlaceholderToFront( + self._resolved_onnx_exporter_options.diagnostic_context, + graph_module, + ).run() + # Generate reference outputs. They are used to indicate output + # tensors' types and devices when calling ORT. + # + # WARNING: The downstream code should not change prim_outputs and + # this backend should always produces output with schema identical to prim_outputs'. + + if self._resolved_onnx_exporter_options.dynamic_shapes: + # No pre-allocation when dynamic shape is enabled. + self.preallocate_output = False + extracted_outputs = _extract_graph_module_outputs(graph_module) + + def maybe_map_to_meta_val(value): + if hasattr(value, "meta") and "val" in value.meta: + # Select outputs with "val" information. Without "val", + # it's not possible access output_arg.meta["val"].device. + return value.meta["val"] + else: + return value + + prim_outputs = _pytree.tree_map( + maybe_map_to_meta_val, extracted_outputs + ) + else: + try: + prim_outputs = FakeTensorProp(graph_module).propagate( + *args, **kwargs + ) + except Exception: + logger.warning("FakeTensorProb failed for %s", graph_module) + # When FakeTensorProp fails, it is not possible to preallocate output buffers + # because the output shapes are not inferred. + self.preallocate_output = False + + # rethrow FakeTensorProb failure because it is not yet currently handled. + raise + + # Create the object to iterate through the nodes in graph one-by-one + # and calls the corresponding ONNX exporter for each node. + fx_interpreter = fx_onnx_interpreter.FxOnnxInterpreter( + diagnostic_context=self._resolved_onnx_exporter_options.diagnostic_context + ) + # Cast FX variables if they will result schema-mismatch when searching + # for ONNX operator. E.g., add(double_tensor, int_tensor) is fine in PyTorch, + # but ONNX expects add(double_tensor, double_tensor). + graph_module = torch.onnx._internal.fx.passes.InsertTypePromotion( + self._resolved_onnx_exporter_options.diagnostic_context, graph_module + ).run() + # Start the per-node exporting process. It's conceptually a for loop + # scanning through the nodes in the graph. + exported = fx_interpreter.run( + fx_graph_module=graph_module, + onnxfunction_dispatcher=self._resolved_onnx_exporter_options.onnxfunction_dispatcher, + op_level_debug=self._resolved_onnx_exporter_options.op_level_debug, + ) + # Convert the exported result to ONNX ModelProto. + onnx_model = exported.to_model_proto( + opset_version=self._resolved_onnx_exporter_options.onnx_registry.opset_version, + ) + + # Initialize a ORT session to execute this ONNX model. + # Note that TorchDynamo assumes all inputs/outputs are on the + # same device, but it's subject to change (very likely with + # dynamic shape support), so we add execution providers + # based on the logic in _select_eps: (explicitly preferred EPs, + # EPs inferred from inputs or graph, and the fallback default EP)/ + # + # TODO(wschin): enable external allocators. + # See https://github.com/pytorch/pytorch/issues/106867 + onnx_session = onnxruntime.InferenceSession( + path_or_bytes=onnx_model.SerializeToString(), + sess_options=self._options.ort_session_options, + providers=self._select_eps(graph_module, *args), + ) + + # Cache ORT session. It's reused for the same "graph_module". + # Generate ONNX model and extract its input and output names. + input_names = tuple(input.name for input in onnx_model.graph.input) + output_names = tuple(output.name for output in onnx_model.graph.output) + input_devices = _get_onnx_devices(args) + # Cache devices for inputs and outputs. They are used to invoke + # ORT session. Output devices indicate where (e.g., GPU or CPU) + # to store outputs + if isinstance(prim_outputs, tuple): + output_devices = _get_onnx_devices(prim_outputs) + else: + output_devices = _get_onnx_devices((prim_outputs,)) + + execution_info_per_session = OrtExecutionInfoPerSession( + session=onnx_session, + input_names=input_names, + input_value_infos=tuple(input for input in onnx_model.graph.input), + output_names=output_names, + output_value_infos=tuple(output for output in onnx_model.graph.output), + input_devices=input_devices, + output_devices=output_devices, + example_outputs=prim_outputs, + ) + + self._all_ort_execution_info.cache_session_execution_info( + graph_module, execution_info_per_session + ) + + self.execution_count += 1 + + # ORT always returns a tuple of outputs. If the original output is a tensor, + # ORT output's first element must be extracted and returned. Otherwise, type + # mismatch may happen in downstream computation. + is_single_tensor_output = isinstance(prim_outputs, torch.Tensor) + normalized_prim_outputs = ( + (prim_outputs,) if is_single_tensor_output else prim_outputs + ) + assert isinstance(normalized_prim_outputs, tuple) + assert all(isinstance(elem, torch.Tensor) for elem in normalized_prim_outputs) + + _nvtx_range_push("run_onnx_session_with_ortvaluevector") + onnx_outputs = self.run( + onnx_session, + input_names, + args, + input_devices, + output_names, + normalized_prim_outputs, + output_devices, + self._options.preallocate_output, + ) + _nvtx_range_pop() + if self._assert_allclose_to_baseline: + # Compute baseline. + baseline_outputs = torch._prims.executor.execute( + graph_module, *args, executor="aten" + ) + normalized_baseline_ouptuts = ( + (baseline_outputs,) if is_single_tensor_output else baseline_outputs + ) + # Ensure every output tensor is close to the corresponding baseline. + for onnx_output, baseline_output in zip( + onnx_outputs, normalized_baseline_ouptuts + ): + torch.testing.assert_close(onnx_output, baseline_output) + return onnx_outputs[0] if is_single_tensor_output else onnx_outputs + + def compile(self, graph_module: torch.fx.GraphModule, args) -> torch.fx.GraphModule: + # Deferred import since CapabilityBasedPartitioner is not decorated with + # @compatibility; importing it at the module level will result in the test + # failing: pytest test/test_fx.py -k test_public_api_surface + # because this module is imported into torch.onnx. + from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner + + # FX graph based partitioning based on ONNX supported ops. + # Given a graph module + # GraphModule0 + # node_0 + # node_1 + # node_2 + # node_3 + # node_4 + # If only node_2 is not supported by ONNX, this graph module will be partitioned into + # GraphModule0 + # GraphModule1 + # node_0 + # node_1 + # node_2 + # GraphModule2 + # node_3 + # node_4 + # by calling CapabilityBasedPartitioner.partition_and_fuse. + # Then, GraphModule1's and GraphModule2's forward method (GraphModule._wrapped_call) + # will be replaced by OrtBackend._ort_accelerated_call to delegate computation to ORT. + if graph_module in self._partitioner_cache: + partitioned_prim_graph_module = self._partitioner_cache[graph_module] + else: + prim_graph_module = graph_module + # TODO(wschin): this is required for removing aten::_to_copy in _replace_to_copy_with_to. + # See https://github.com/pytorch/pytorch/issues/106871. + _replace_to_copy_with_to(prim_graph_module) + partitioner = CapabilityBasedPartitioner( + prim_graph_module, + self._supported_ops, + allows_single_node_partition=True, + ) + partitioned_prim_graph_module = partitioner.partition_and_fuse() + self._partitioner_cache[graph_module] = partitioned_prim_graph_module + + # Overriding fused_module's __call__() function with ort_acclerated_call() + # This loop goes through all graph partitions (each of them is an ONNX-representable graph) + # and override their _wrapped_call function with _ort_accelerated_call. + # Inside _ort_accelerated_call, the partition's graph is exported into ONNX and executed by ORT. + for node in partitioned_prim_graph_module.graph.nodes: + # TODO(wschin): use a better way to identify fused submodule + # See https://github.com/pytorch/pytorch/issues/106872. + if node.op == "call_module" and "fused_" in node.name: + fused_module = getattr(partitioned_prim_graph_module, node.name) + # self.ort_acclerated_call is responsible for exporting graph to ONNX, + # creating ORT session, and running ORT session. + fused_module._wrapped_call = self._ort_acclerated_call + + return partitioned_prim_graph_module + + def __call__( + self, graph_module: torch.fx.GraphModule, args + ) -> torch.fx.GraphModule: + """If ``OrtBackendOptions.use_aot_autograd`` is ``True``, the `auto_autograd` compiler + will be invoked, wrapping this ``OrtBackend`` instance's ``compile`` method. Otherwise, + the ``compile`` method is invoked directly.""" + if self._options.use_aot_autograd: + from functorch.compile import min_cut_rematerialization_partition + + from torch._dynamo.backends.common import aot_autograd + + return aot_autograd( + fw_compiler=self.compile, + partition_fn=min_cut_rematerialization_partition, + decompositions=self._resolved_onnx_exporter_options.decomposition_table, + )(graph_module, args) + + return self.compile(graph_module, args) + + __instance_cache_max_count: Final = 8 + __instance_cache: Final[List["OrtBackend"]] = [] + + @staticmethod + def get_cached_instance_for_options( + options: Optional[Union[OrtBackendOptions, Mapping[str, Any]]] = None, + ) -> "OrtBackend": + """Returns a possibly cached instance of an ``OrtBackend``. If an existing + backend was created previously through this function with the same options, + it will be returned. Otherwise a new backend will be created, cached, and + returned. + + Note: if ``options`` sets ``ort_session_options``, a new ``OrtBackend`` + will always be returned, since ``onnxruntime.SessionOptions`` cannot + participate in caching.""" + + def reusable(a: OrtBackendOptions, b: OrtBackendOptions): + if ( + a.preferred_execution_providers != b.preferred_execution_providers + or a.infer_execution_providers != b.infer_execution_providers + or a.default_execution_providers != b.default_execution_providers + or a.preallocate_output != b.preallocate_output + or a.use_aot_autograd != b.use_aot_autograd + ): + return False + + # onnxruntime.SessionOptions is a pybind11 object, cannot be pickled, + # and holds too much potential state to reasonably check manually; + # ort_session_options is provided at all, the backend does not participate + # in caching. + if a.ort_session_options is not None or b.ort_session_options is not None: + return False + + if a.export_options is b.export_options: + return True + + # Similarly, some objects in ExportOptions are too stateful to use for + # caching. We should revisit this. + if a.export_options is not None and b.export_options is not None: + return ( + a.export_options.dynamic_shapes == b.export_options.dynamic_shapes + and a.export_options.op_level_debug + == b.export_options.op_level_debug + and a.export_options.diagnostic_options + == b.export_options.diagnostic_options + and a.export_options.onnx_registry is b.export_options.onnx_registry + and a.export_options.fake_context is b.export_options.fake_context + ) + + # We can't account for how the two option sets may differ, so it's not safe to reuse. + return False + + if not isinstance(options, OrtBackendOptions): + options = OrtBackendOptions(**(options or {})) + + backend = next( + (b for b in OrtBackend.__instance_cache if reusable(b._options, options)), + None, + ) + + if backend is None: + assert ( + len(OrtBackend.__instance_cache) < OrtBackend.__instance_cache_max_count + ), ( + f"No more than {OrtBackend.__instance_cache_max_count} instances of " + f"{OrtBackend} allowed. Please instantiate `{OrtBackend}` explicitly " + "to pass to `torch.compile`. " + "See https://github.com/pytorch/pytorch/pull/107973#discussion_r1306144795 " + "for discussion." + ) + OrtBackend.__instance_cache.append(backend := OrtBackend(options)) + + return backend + + @staticmethod + def clear_cached_instances(): + OrtBackend.__instance_cache.clear() + + @staticmethod + def get_cached_instances(): + return tuple(OrtBackend.__instance_cache) + + +@compatibility(is_backward_compatible=False) +def torch_compile_backend( + graph_module: torch.fx.GraphModule, + args, + *, + options: Optional[Union[OrtBackendOptions, Mapping[str, Any]]] = None, +): + return OrtBackend.get_cached_instance_for_options(options)(graph_module, args) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/registration.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/registration.py new file mode 100644 index 0000000000000000000000000000000000000000..017a2fb7dadfb2146ca629373b9a9d3506ad3b71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/registration.py @@ -0,0 +1,339 @@ +"""Module for handling symbolic function registration.""" + +import warnings +from typing import ( + Callable, + Collection, + Dict, + Generic, + Optional, + Sequence, + Set, + TypeVar, + Union, +) + +from torch.onnx import _constants, errors +from torch.onnx._internal import _beartype + +OpsetVersion = int + + +def _dispatch_opset_version( + target: OpsetVersion, registered_opsets: Collection[OpsetVersion] +) -> Optional[OpsetVersion]: + """Finds the registered opset given a target opset version and the available opsets. + + Args: + target: The target opset version. + registered_opsets: The available opsets. + + Returns: + The registered opset version. + """ + if not registered_opsets: + return None + + descending_registered_versions = sorted(registered_opsets, reverse=True) + # Linear search for the opset version, which is fine since the number of opset + # versions is small. + + if target >= _constants.ONNX_BASE_OPSET: + # Always look down toward opset 1 when the target is >= ONNX_BASE_OPSET (opset 9). + # When a custom op is register at opset 1, we want to be able to discover it as a + # fallback for all opsets >= ONNX_BASE_OPSET. + for version in descending_registered_versions: + if version <= target: + return version + return None + + # target < opset 9. This is the legacy behavior to support opset 7 and opset 8. + # for caffe2 support. We search up toward opset 9. + for version in reversed(descending_registered_versions): + # Count back up until _constants.ONNX_BASE_OPSET + if target <= version <= _constants.ONNX_BASE_OPSET: + return version + + return None + + +_K = TypeVar("_K") +_V = TypeVar("_V") + + +class OverrideDict(Generic[_K, _V], Collection[_K]): + """A dictionary that merges built-in and custom symbolic functions. + + It supports overriding and un-overriding built-in symbolic functions with custom + ones. + """ + + def __init__(self): + self._base: Dict[_K, _V] = {} + self._overrides: Dict[_K, _V] = {} + self._merged: Dict[_K, _V] = {} + + def set_base(self, key: _K, value: _V) -> None: + self._base[key] = value + if key not in self._overrides: + self._merged[key] = value + + def in_base(self, key: _K) -> bool: + """Checks if a key is in the base dictionary.""" + return key in self._base + + def override(self, key: _K, value: _V) -> None: + """Overrides a base key-value with a new pair.""" + self._overrides[key] = value + self._merged[key] = value + + def remove_override(self, key: _K) -> None: + """Un-overrides a key-value pair.""" + self._overrides.pop(key, None) # type: ignore[arg-type] + self._merged.pop(key, None) # type: ignore[arg-type] + if key in self._base: + self._merged[key] = self._base[key] + + def overridden(self, key: _K) -> bool: + """Checks if a key-value pair is overridden.""" + return key in self._overrides + + def __getitem__(self, key: _K) -> _V: + return self._merged[key] + + def get(self, key: _K, default: Optional[_V] = None): + return self._merged.get(key, default) + + def __contains__(self, key: object) -> bool: + return key in self._merged + + def __iter__(self): + return iter(self._merged) + + def __len__(self) -> int: + return len(self._merged) + + def __repr__(self) -> str: + return f"OverrideDict(base={self._base}, overrides={self._overrides})" + + def __bool__(self) -> bool: + return bool(self._merged) + + +class _SymbolicFunctionGroup: + """Different versions of symbolic functions registered to the same name. + + O(number of registered versions of an op) search is performed to find the most + recent version of the op. + + The registration is delayed until op is used to improve startup time. + + Function overloads with different arguments are not allowed. + Custom op overrides are supported. + """ + + def __init__(self, name: str) -> None: + self._name = name + # A dictionary of functions, keyed by the opset version. + self._functions: OverrideDict[OpsetVersion, Callable] = OverrideDict() + + def __repr__(self) -> str: + return f"_SymbolicFunctionGroup({self._name}, registered={self._functions})" + + def __getitem__(self, key: OpsetVersion) -> Callable: + result = self.get(key) + if result is None: + raise KeyError(key) + return result + + # TODO(justinchuby): Add @functools.lru_cache(maxsize=None) if lookup time becomes + # a problem. + def get(self, opset: OpsetVersion) -> Optional[Callable]: + """Find the most recent version of the function.""" + version = _dispatch_opset_version(opset, self._functions) + if version is None: + return None + + return self._functions[version] + + def add(self, func: Callable, opset: OpsetVersion) -> None: + """Adds a symbolic function. + + Args: + func: The function to add. + opset: The opset version of the function to add. + """ + if self._functions.in_base(opset): + warnings.warn( + f"Symbolic function '{self._name}' already registered for opset {opset}. " + f"Replacing the existing function with new function. This is unexpected. " + f"Please report it on {_constants.PYTORCH_GITHUB_ISSUES_URL}.", + errors.OnnxExporterWarning, + ) + self._functions.set_base(opset, func) + + def add_custom(self, func: Callable, opset: OpsetVersion) -> None: + """Adds a custom symbolic function. + + Args: + func: The symbolic function to register. + opset: The corresponding opset version. + """ + self._functions.override(opset, func) + + def remove_custom(self, opset: OpsetVersion) -> None: + """Removes a custom symbolic function. + + Args: + opset: The opset version of the custom function to remove. + """ + if not self._functions.overridden(opset): + warnings.warn( + f"No custom function registered for '{self._name}' opset {opset}" + ) + return + self._functions.remove_override(opset) + + def get_min_supported(self) -> OpsetVersion: + """Returns the lowest built-in opset version supported by the function.""" + return min(self._functions) + + +class SymbolicRegistry: + """Registry for symbolic functions. + + The registry maintains a mapping from qualified names to symbolic functions. + It is used to register new symbolic functions and to dispatch calls to + the appropriate function. + """ + + def __init__(self) -> None: + self._registry: Dict[str, _SymbolicFunctionGroup] = {} + + def register( + self, name: str, opset: OpsetVersion, func: Callable, custom: bool = False + ) -> None: + """Registers a symbolic function. + + Args: + name: The qualified name of the function to register. In the form of 'domain::op'. + E.g. 'aten::add'. + opset: The opset version of the function to register. + func: The symbolic function to register. + custom: Whether the function is a custom function that overrides existing ones. + + Raises: + ValueError: If the separator '::' is not in the name. + """ + if "::" not in name: + raise ValueError( + f"The name must be in the form of 'domain::op', not '{name}'" + ) + symbolic_functions = self._registry.setdefault( + name, _SymbolicFunctionGroup(name) + ) + if custom: + symbolic_functions.add_custom(func, opset) + else: + symbolic_functions.add(func, opset) + + def unregister(self, name: str, opset: OpsetVersion) -> None: + """Unregisters a symbolic function. + + Args: + name: The qualified name of the function to unregister. + opset: The opset version of the function to unregister. + """ + if name not in self._registry: + return + self._registry[name].remove_custom(opset) + + def get_function_group(self, name: str) -> Optional[_SymbolicFunctionGroup]: + """Returns the function group for the given name.""" + return self._registry.get(name) + + def is_registered_op(self, name: str, version: int) -> bool: + """Returns whether the given op is registered for the given opset version.""" + functions = self.get_function_group(name) + if functions is None: + return False + return functions.get(version) is not None + + def all_functions(self) -> Set[str]: + """Returns the set of all registered function names.""" + return set(self._registry) + + +@_beartype.beartype +def onnx_symbolic( + name: str, + opset: Union[OpsetVersion, Sequence[OpsetVersion]], + decorate: Optional[Sequence[Callable]] = None, + custom: bool = False, +) -> Callable: + """Registers a symbolic function. + + Usage:: + + ``` + @onnx_symbolic("aten::symbolic_b", opset=10, decorate=[quantized_aten_handler(scale=1/128, zero_point=0)]) + @symbolic_helper.parse_args("v", "v", "b") + def symbolic_b(g: _C.Graph, x: _C.Value, y: _C.Value, arg1: bool) -> _C.Value: + ... + ``` + + Args: + name: The qualified name of the function in the form of 'domain::op'. + E.g. 'aten::add'. + opset: The opset versions of the function to register at. + decorate: A sequence of decorators to apply to the function. + custom: Whether the function is a custom symbolic function. + + Raises: + ValueError: If the separator '::' is not in the name. + """ + + def wrapper(func: Callable) -> Callable: + decorated = func + if decorate is not None: + for decorate_func in decorate: + decorated = decorate_func(decorated) + + global registry + nonlocal opset + if isinstance(opset, OpsetVersion): + opset = (opset,) + for opset_version in opset: + registry.register(name, opset_version, decorated, custom=custom) + + # Return the original function because the decorators in "decorate" are only + # specific to the instance being registered. + return func + + return wrapper + + +@_beartype.beartype +def custom_onnx_symbolic( + name: str, + opset: Union[OpsetVersion, Sequence[OpsetVersion]], + decorate: Optional[Sequence[Callable]] = None, +) -> Callable: + """Registers a custom symbolic function. + + Args: + name: the qualified name of the function. + opset: the opset version of the function. + decorate: a sequence of decorators to apply to the function. + + Returns: + The decorator. + + Raises: + ValueError: If the separator '::' is not in the name. + """ + return onnx_symbolic(name, opset, decorate, custom=True) + + +# The registry for all symbolic functions. +registry = SymbolicRegistry() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/_onnx_supported_ops.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_onnx_supported_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2611b0d81e9b300259303bb5b6c20c50e88243c1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/_onnx_supported_ops.py @@ -0,0 +1,97 @@ +import inspect +from typing import Dict, List, Union + +from torch import _C +from torch.onnx import _constants +from torch.onnx._internal import registration + + +class _TorchSchema: + def __init__(self, schema: Union[_C.FunctionSchema, str]) -> None: + if isinstance(schema, _C.FunctionSchema): + self.name: str = schema.name + self.overload_name: str = schema.overload_name + self.arguments: List[str] = [arg.name for arg in schema.arguments] + self.optional_arguments: List[str] = [] + self.returns: List[str] = [ret.name for ret in schema.returns] + self.opsets: List[int] = [] + else: + self.name = schema + self.overload_name = "" + self.arguments = [] + self.optional_arguments = [] + self.returns = [] + self.opsets = [] + + def __str__(self) -> str: + s = ( + f"{self.name}.{self.overload_name}(" + + ", ".join(self.arguments) + + ") -> (" + + ", ".join(self.returns) + + ")" + + " in opsets " + + ", ".join(str(opset) for opset in self.opsets) + ) + return s + + def __hash__(self): + # TODO(thiagocrepaldi): handle overload_name? + return hash(self.name) + + def __eq__(self, other) -> bool: + if not isinstance(other, _TorchSchema): + return False + # TODO(thiagocrepaldi): handle overload_name? + return self.name == other.name + + def is_aten(self) -> bool: + return self.name.startswith("aten::") + + def is_backward(self) -> bool: + return "backward" in self.name + + +def _symbolic_argument_count(func): + params = [] + signature = inspect.signature(func) + optional_params = [] + for name, parameter in signature.parameters.items(): + if name in {"_outputs", "g"}: + continue + if parameter.default is parameter.empty: + optional_params.append(parameter) + else: + params.append(str(parameter)) + return params + + +def all_forward_schemas() -> Dict[str, _TorchSchema]: + """Returns schemas for all TorchScript forward ops.""" + torch_schemas = [_TorchSchema(s) for s in _C._jit_get_all_schemas()] + return {schema.name: schema for schema in torch_schemas if not schema.is_backward()} + + +def all_symbolics_schemas() -> Dict[str, _TorchSchema]: + """Returns schemas for all onnx supported ops.""" + symbolics_schemas = {} + + for name in registration.registry.all_functions(): + func_group = registration.registry.get_function_group(name) + assert func_group is not None + symbolics_schema = _TorchSchema(name) + func = func_group.get(_constants.ONNX_MAX_OPSET) + if func is not None: + symbolics_schema.arguments = _symbolic_argument_count(func) + symbolics_schema.opsets = list( + range(func_group.get_min_supported(), _constants.ONNX_MAX_OPSET + 1) + ) + else: + # Only support opset < 9 + func = func_group.get(7) + symbolics_schema.arguments = _symbolic_argument_count(func) + symbolics_schema.opsets = list(range(7, _constants.ONNX_BASE_OPSET)) + + symbolics_schemas[name] = symbolics_schema + + return symbolics_schemas diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/errors.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad8bab4122edd6fcd35f09767322c46aa7739bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/errors.py @@ -0,0 +1,106 @@ +"""ONNX exporter exceptions.""" +from __future__ import annotations + +import textwrap +from typing import Optional + +from torch import _C +from torch.onnx import _constants +from torch.onnx._internal import diagnostics + +__all__ = [ + "OnnxExporterError", + "OnnxExporterWarning", + "CheckerError", + "SymbolicValueError", + "UnsupportedOperatorError", +] + + +class OnnxExporterWarning(UserWarning): + """Base class for all warnings in the ONNX exporter.""" + + pass + + +class OnnxExporterError(RuntimeError): + """Errors raised by the ONNX exporter.""" + + pass + + +class CheckerError(OnnxExporterError): + """Raised when ONNX checker detects an invalid model.""" + + pass + + +class UnsupportedOperatorError(OnnxExporterError): + """Raised when an operator is unsupported by the exporter.""" + + def __init__(self, name: str, version: int, supported_version: Optional[int]): + if supported_version is not None: + diagnostic_rule: diagnostics.infra.Rule = ( + diagnostics.rules.operator_supported_in_newer_opset_version + ) + msg = diagnostic_rule.format_message(name, version, supported_version) + diagnostics.diagnose(diagnostic_rule, diagnostics.levels.ERROR, msg) + else: + if name.startswith(("aten::", "prim::", "quantized::")): + diagnostic_rule = diagnostics.rules.missing_standard_symbolic_function + msg = diagnostic_rule.format_message( + name, version, _constants.PYTORCH_GITHUB_ISSUES_URL + ) + diagnostics.diagnose(diagnostic_rule, diagnostics.levels.ERROR, msg) + else: + diagnostic_rule = diagnostics.rules.missing_custom_symbolic_function + msg = diagnostic_rule.format_message(name) + diagnostics.diagnose(diagnostic_rule, diagnostics.levels.ERROR, msg) + super().__init__(msg) + + +class SymbolicValueError(OnnxExporterError): + """Errors around TorchScript values and nodes.""" + + def __init__(self, msg: str, value: _C.Value): + message = ( + f"{msg} [Caused by the value '{value}' (type '{value.type()}') in the " + f"TorchScript graph. The containing node has kind '{value.node().kind()}'.] " + ) + + code_location = value.node().sourceRange() + if code_location: + message += f"\n (node defined in {code_location})" + + try: + # Add its input and output to the message. + message += "\n\n" + message += textwrap.indent( + ( + "Inputs:\n" + + ( + "\n".join( + f" #{i}: {input_} (type '{input_.type()}')" + for i, input_ in enumerate(value.node().inputs()) + ) + or " Empty" + ) + + "\n" + + "Outputs:\n" + + ( + "\n".join( + f" #{i}: {output} (type '{output.type()}')" + for i, output in enumerate(value.node().outputs()) + ) + or " Empty" + ) + ), + " ", + ) + except AttributeError: + message += ( + " Failed to obtain its input and output for debugging. " + "Please refer to the TorchScript graph for debugging information." + ) + + super().__init__(message) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/operators.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/operators.py new file mode 100644 index 0000000000000000000000000000000000000000..e5f12444c35593b41d8c877312fe0f41c88267ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/operators.py @@ -0,0 +1,20 @@ +r"""This file provides a location for operators that help exporting models via onnx. + +E.g. `shape_as_tensor` and `reshape_from_tensor_shape` +are to make all dynamic sizes operations traceable. + +NOTE: at one point these functions were implemented differently. +Since then we have implemented these directly in ATen, so this +file is kept purely for backward-compatibility. +""" + +import torch +import torch.onnx + + +def shape_as_tensor(x): + return torch._shape_as_tensor(x) + + +def reshape_from_tensor_shape(x, shape): + return torch._reshape_from_tensor(x, shape) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_caffe2.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_caffe2.py new file mode 100644 index 0000000000000000000000000000000000000000..3398fcd2fe10acf6402ed50448e3252a739cbac6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_caffe2.py @@ -0,0 +1,359 @@ +import importlib +import inspect + +from torch.onnx import symbolic_helper, symbolic_opset9 as opset9 +from torch.onnx._internal import jit_utils, registration + + +def register_quantized_ops(domain: str, version: int): + # Register all quantized ops + module = importlib.import_module("torch.onnx.symbolic_caffe2") + quant_version_ops = inspect.getmembers(module) + aten_q_ops = { + "relu", + "_empty_affine_quantized", + "dequantize", + "quantize_per_tensor", + "upsample_nearest2d", + "avg_pool2d", + "reshape", + "slice", + "cat", + "max_pool2d", + "sigmoid", + } + for op, func in quant_version_ops: + name = f"{domain}::{op}" + if inspect.isfunction(func) and not registration.registry.is_registered_op( + name, version + ): + if op in aten_q_ops: + # Override the builtin aten ops + registration.registry.register( + f"aten::{op}", version, func, custom=True + ) + registration.registry.register(name, version, func) + + +def _permute_helper(g: jit_utils.GraphContext, input, axes): + quant_args = { + "axes_i": axes, + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + output = g.op("_caffe2::Int8Transpose", input, **quant_args) + symbolic_helper._quantized_ops.add(output) + return output + + +def nchw2nhwc(g: jit_utils.GraphContext, input): + axes = [0, 2, 3, 1] + return _permute_helper(g, input, axes) + + +def nhwc2nchw(g: jit_utils.GraphContext, input): + axes = [0, 3, 1, 2] + return _permute_helper(g, input, axes) + + +def linear_prepack(g: jit_utils.GraphContext, weight, bias): + # Mapping to a dummy caffe2 prepack node. + # During the onnx -> c2 conversion we can look up original weight and bias + # from this node + output = g.op("_caffe2::WeightPrepack", weight, bias) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "v", "v", "f", "i") +def linear(g: jit_utils.GraphContext, input, weight, bias, scale, zero_point): + kwargs = { + "Y_scale_f": scale, + "Y_zero_point_i": zero_point, + } + output = g.op("_caffe2::Int8FC", input, weight, bias, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +def conv_prepack( + g: jit_utils.GraphContext, input, weight, bias, stride, padding, dilation, groups +): + # Mapping to a dummy caffe2 prepack node. + # During the onnx -> c2 conversion we can look up original weight and bias + # from this node + output = g.op("_caffe2::WeightPrepack", input, weight, bias) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "f", "i") +def conv2d( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + groups, + scale, + zero_point, +): + kernel_size = weight.node()["shape"][1:3] + kwargs = { + "strides_i": stride, + "pads_i": padding + padding, + "dilations_i": dilation, + "group_i": groups, + "kernels_i": kernel_size, + "order_s": "NHWC", + "Y_scale_f": scale, + "Y_zero_point_i": zero_point, + } + output = g.op("_caffe2::Int8Conv", input, weight, bias, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "f", "i") +def conv2d_relu( + g: jit_utils.GraphContext, + input, + weight, + bias, + stride, + padding, + dilation, + groups, + scale, + zero_point, +): + kernel_size = weight.node()["shape"][1:3] + kwargs = { + "strides_i": stride, + "pads_i": padding + padding, + "dilations_i": dilation, + "group_i": groups, + "kernels_i": kernel_size, + "order_s": "NHWC", + "Y_scale_f": scale, + "Y_zero_point_i": zero_point, + } + output = g.op("_caffe2::Int8ConvRelu", input, weight, bias, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "v", "f", "i") +def add(g: jit_utils.GraphContext, input_a, input_b, scale, zero_point): + kwargs = { + "Y_scale_f": scale, + "Y_zero_point_i": zero_point, + } + output = g.op("_caffe2::Int8Add", input_a, input_b, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v") +def relu(g: jit_utils.GraphContext, input): + if input not in symbolic_helper._quantized_ops: + return opset9.relu(g, input) + kwargs = { + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + output = g.op("_caffe2::Int8Relu", input, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "f", "i", "t") +def quantize_per_tensor(g: jit_utils.GraphContext, input, scale, zero_point, dtype): + kwargs = { + "Y_scale_f": scale, + "Y_zero_point_i": zero_point, + } + output = g.op("_caffe2::Int8Quantize", input, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v") +def dequantize(g: jit_utils.GraphContext, input): + return g.op("_caffe2::Int8Dequantize", input) + + +@symbolic_helper.parse_args("v", "t", "t", "t", "t", "t", "t", "t") +def _empty_affine_quantized( + g: jit_utils.GraphContext, + input, + shape, + scale, + zero_point, + dtype, + pin_memory, + memory_format, + layout, +): + return input + + +def upsample_nearest2d( + g: jit_utils.GraphContext, + input, + output_size, + align_corners=None, + scales_h=None, + scales_w=None, +): + if input not in symbolic_helper._quantized_ops: + return opset9.upsample_nearest2d(g, input, output_size, align_corners) # type: ignore[attr-defined] + + output_size = symbolic_helper._parse_arg(output_size, "is") + kwargs = { + "output_size_i": output_size, + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + input = nchw2nhwc(g, input) + output = g.op("_caffe2::Int8ResizeNearest", input, **kwargs) + output = nhwc2nchw(g, output) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "is", "is", "is", "is", "i") +def max_pool2d( + g: jit_utils.GraphContext, + input, + kernel_size, + stride, + padding, + dilation, + ceil_mode, +): + if input not in symbolic_helper._quantized_ops: + return opset9.max_pool2d( # type: ignore[attr-defined] + g, input, kernel_size, stride, padding, dilation, ceil_mode + ) + kwargs = { + "strides_i": stride, + "pads_i": padding + padding, + "kernel_i": kernel_size[0], + "order_s": "NHWC", + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + input = nchw2nhwc(g, input) + output = g.op("_caffe2::Int8MaxPool", input, **kwargs) + output = nhwc2nchw(g, output) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none") +def avg_pool2d( + g: jit_utils.GraphContext, + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override=None, +): + if input not in symbolic_helper._quantized_ops: + return opset9.avg_pool2d( # type: ignore[attr-defined] + g, + input, + kernel_size, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override, + ) + kwargs = { + "strides_i": stride, + "pads_i": padding + padding, + "kernel_i": kernel_size[0], + "order_s": "NHWC", + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + input = nchw2nhwc(g, input) + output = g.op("_caffe2::Int8AveragePool", input, **kwargs) + output = nhwc2nchw(g, output) + symbolic_helper._quantized_ops.add(output) + return output + + +def reshape(g: jit_utils.GraphContext, input, shape): + if input not in symbolic_helper._quantized_ops: + return opset9.reshape(g, input, shape) + + kwargs = { + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + output = g.op("_caffe2::Int8Reshape", input, shape, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v", "v", "v", "v", "i") +def slice(g: jit_utils.GraphContext, input, dim, start, end, step): + if input not in symbolic_helper._quantized_ops: + return opset9.slice(g, input, dim, start, end, step) + + if step != 1: + raise RuntimeError("ONNX quantized slice export only works for step 1.") + start = symbolic_helper._parse_arg(start, "i") + end = symbolic_helper._parse_arg(end, "i") + dim = symbolic_helper._parse_arg(dim, "i") + + kwargs = { + "start_idx_i": start, + "end_idx_i": end, + "dim_i": dim, + "Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"), + "Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"), + } + output = g.op("_caffe2::Int8Slice", input, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +def cat(g: jit_utils.GraphContext, tensor_list, dim, scale=None, zero_point=None): + tensors = symbolic_helper._unpack_list(tensor_list) + input = tensors[0] + if input not in symbolic_helper._quantized_ops: + return opset9.cat(g, tensor_list, dim) + + dim = symbolic_helper._parse_arg(dim, "i") + kwargs = { + "Y_scale_f": tensors[0].node()["Y_scale"], + "Y_zero_point_i": tensors[0].node()["Y_zero_point"], + } + output = g.op("_caffe2::Int8Concat", *tensors, axis_i=dim, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output + + +@symbolic_helper.parse_args("v") +def sigmoid(g: jit_utils.GraphContext, input): + if input not in symbolic_helper._quantized_ops: + return opset9.sigmoid(g, input) + # Caffe2 expects the output scale to be 1/2^8 + # and output zero_point to be 0 (quint8 type) + out_scale = 1.0 / 256 + zero_point = 0 + kwargs = { + "Y_scale_f": out_scale, + "Y_zero_point_i": zero_point, + } + output = g.op("_caffe2::Int8Sigmoid", input, **kwargs) + symbolic_helper._quantized_ops.add(output) + return output diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_helper.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b55c7dec99acb82280016468f2285126fde46c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_helper.py @@ -0,0 +1,1823 @@ +from __future__ import annotations + +import functools +import inspect +import sys +import typing +import warnings +from typing import ( + Any, + Callable, + List, + Literal, + NoReturn, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +import torch._C._onnx as _C_onnx +from torch import _C + +# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics +from torch.onnx import _constants, _type_utils, errors +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils +from torch.types import Number + +__all__ = [ + "args_have_same_dtype", + "cast_pytorch_to_onnx", + "check_training_mode", + "dequantize_helper", + "is_caffe2_aten_fallback", + "is_complex_value", + "parse_args", + "pytorch_name_to_type", + "quantize_helper", + "quantized_args", + "requantize_bias_helper", + "scalar_name_to_pytorch", + "scalar_type_to_onnx", + "scalar_type_to_pytorch_type", +] + +# --------------------------------------------------------------------------------- +# Helper functions +# --------------------------------------------------------------------------------- + +_ValueDescriptor = Literal[ + "v", + "i", + "is", + "f", + "fs", + "b", + "s", + "t", + "none", +] + + +@_beartype.beartype +def _parse_arg( + value, + desc: _ValueDescriptor, + arg_name: Optional[str] = None, + node_name: Optional[str] = None, +): + if desc == "none": + return value + if desc == "v" or not _is_value(value): + return value + + node = value.node() + if node.mustBeNone(): + return None + if node.kind() == "onnx::Constant": + node_val = _node_get(node, "value") + if desc == "i": + return int(node_val) + elif desc == "f": + return float(node_val) + elif desc == "b": + return bool(node_val) + elif desc == "s": + return str(node_val) + elif desc == "t": + return node_val + elif desc == "is": + return [int(v) for v in node_val] + elif desc == "fs": + return [float(v) for v in node_val] + else: + raise errors.SymbolicValueError( + f"ONNX symbolic does not understand the Constant node '{node}' " + f"specified with descriptor '{desc}'.", + value, + ) + elif node.kind() == "prim::ListConstruct": + if desc == "is": + for v in node.inputs(): + element_node = v.node() + if element_node.kind() != "onnx::Constant": + raise errors.SymbolicValueError( + f"Failed to export a node '{element_node}' " + f"(in list node {node}) " + f"because it is not constant. " + f"Please try to make things (e.g. kernel sizes) static if possible.", + value, + ) + return [int(_node_get(v.node(), "value")) for v in value.node().inputs()] + else: + raise errors.SymbolicValueError( + f"ONNX symbolic does not know how to unpack the ListConstruct node that " + f"is not a list of integers: '{node}'", + value, + ) + + if arg_name is None or node_name is None: + raise errors.SymbolicValueError( + f"Expected node type 'onnx::Constant', got '{node.kind()}'.", + value, + ) + + raise errors.SymbolicValueError( + "Expected node type 'onnx::Constant' " + f"for argument '{arg_name}' of node '{node_name}', got '{node.kind()}'.", + value, + ) + + +@_beartype.beartype +def _node_get(node: _C.Node, key: str): + """Gets attributes of a node which is polymorphic over return type.""" + assert isinstance(node, _C.Node) + sel = node.kindOf(key) + return getattr(node, sel)(key) + + +@_beartype.beartype +def _is_onnx_constant(value: _C.Value): + """Whether a Value is an ONNX constant.""" + return value.node().kind() == "onnx::Constant" + + +@_beartype.beartype +def _maybe_get_const( + value: Optional[Union[_C.Value, torch.Tensor, Number, Sequence]], + descriptor: _ValueDescriptor, +): + # NOTE: prim::Constant at this stage usually means something not compatible in ONNX, + # otherwise it'd be converted to onnx::Constant + # TODO(justinchuby): Replace insinstance with _is_value once we figure out mypy + if isinstance(value, _C.Value) and _is_onnx_constant(value): + return _parse_arg(value, descriptor) + return value + + +@_beartype.beartype +def _maybe_get_scalar(value): + value_t = _maybe_get_const(value, "t") + if isinstance(value_t, torch.Tensor) and value_t.shape == (): + return value_t + return value + + +@_beartype.beartype +def _get_const(value, desc, arg_name): + if not _is_constant(value): + raise errors.SymbolicValueError( + f"ONNX symbolic expected a constant value of the '{arg_name}' argument, " + f"got '{value}'", + value, + ) + return _parse_arg(value, desc) + + +@_beartype.beartype +def _unpack_list(list_value: _C.Value) -> List[_C.Value]: + list_node = list_value.node() + if list_node.kind() != "prim::ListConstruct": + raise errors.SymbolicValueError( + f"ONNX symbolic expected node type prim::ListConstruct, " + f"got '{list_node}'.", + list_value, + ) + return list(list_node.inputs()) + + +@_beartype.beartype +def _unpack_tuple(tuple_value: _C.Value) -> Tuple[_C.Value, ...]: + tuple_node = tuple_value.node() + if not _is_tuple_construct(tuple_value): + raise errors.SymbolicValueError( + f"ONNX symbolic expected node type 'prim::TupleConstruct', " + f"got '{tuple_node.kind()}'.", + tuple_value, + ) + return tuple(tuple_node.inputs()) + + +@_beartype.beartype +def _unpack_quantized_tensor(tuple_value: _C.Value) -> Tuple[_C.Value, ...]: + """Unpacks a quantized tensor into a tuple of tensor and scale/zero_point. + Args: + tuple_value: A tuple of tensor, scale, zero_point, and optionally axis. + Returns: + A tuple of tensor, scale, zero_point, and optionally axis. + """ + tuple_node = tuple_value.node() + # A quantized tensor is represented as tuple of the form (tensor, scale, zero_point, ) + if not _is_tuple_construct(tuple_value): + raise errors.SymbolicValueError( + f"ONNX symbolic expected the output of `{tuple_node}` to be a quantized " + f"tensor. Is this likely due to missing support for quantized " + f"`{tuple_node.kind()}`. Please create an issue on {_constants.PYTORCH_GITHUB_ISSUES_URL}", + tuple_value, + ) + unpacked = tuple(tuple_node.inputs()) + assert len(unpacked) == 3 or len(unpacked) == 4 + return unpacked + + +# Check if list_value is output from prim::ListConstruct +# This is usually called before _unpack_list to ensure the list can be unpacked. +@_beartype.beartype +def _is_packed_list(list_value: Any) -> bool: + return _is_value(list_value) and list_value.node().kind() == "prim::ListConstruct" + + +@_beartype.beartype +def parse_args(*arg_descriptors: _ValueDescriptor): + """A decorator which converts args from torch._C.Value to built-in types. + + For example: + + ``` + @parse_args('v', 'i', 'fs') + foo(g, a, b, c): + assert isinstance(a, torch._C.Value) + assert isinstance(b, int) + assert isinstance(c, list) + assert isinstance(c[0], float) + ``` + + Args: + arg_descriptors: list of str, where each element is + a string that specifies the type to convert to. Valid descriptors: + "v": no conversion, keep torch._C.Value. + "i": int + "is": list of int + "f": float + "fs": list of float + "b": bool + "s": str + "t": torch.Tensor + "none": the variable is unused + """ + + def decorator(fn): + fn._arg_descriptors = arg_descriptors + + @functools.wraps(fn) + def wrapper(g, *args, **kwargs): + # some args may be optional, so the length may be smaller + FILE_BUG_MSG = ( + "If you believe this is not due to custom symbolic implementation within your code or " + "an external library, please file an issue at " + "https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to report this bug." + ) + assert len(arg_descriptors) >= len(args), ( + f"A mismatch between the number of arguments ({len(args)}) and " + f"their descriptors ({len(arg_descriptors)}) was found at symbolic function '{fn.__name__}'. " + f"{FILE_BUG_MSG}" + ) + + try: + sig = inspect.signature(fn) + arg_names = list(sig.parameters.keys())[1:] + fn_name = fn.__name__ + except Exception: + # FIXME(justinchuby): Avoid catching Exception. + # Catch a more specific exception instead. + arg_names = [None] * len(args) # type: ignore[list-item] + fn_name = None + args = [ + _parse_arg(arg, arg_desc, arg_name, fn_name) # type: ignore[assignment] + for arg, arg_desc, arg_name in zip(args, arg_descriptors, arg_names) + ] + # only support _outputs in kwargs + assert len(kwargs) <= 1, ( + f"Symbolic function {fn.__name__}'s '**kwargs' can contain a single " + f"key/value entry. " + f"{FILE_BUG_MSG}" + ) + + if len(kwargs) == 1: + assert "_outputs" in kwargs, ( + f"Symbolic function {fn.__name__}'s '**kwargs' can only contain " + f"'_outputs' key at '**kwargs'. " + f"{FILE_BUG_MSG}" + ) + return fn(g, *args, **kwargs) + + return wrapper + + return decorator + + +@_beartype.beartype +def quantized_args( + *arg_q_descriptors: bool, + scale: Optional[float] = None, + zero_point: Optional[int] = None, + quantize_output: bool = True, +): + """A decorator which extends support for quantized version of the base operator. + + Quantization is detected by examining the arguments that are annotated by + `arg_q_descriptors`. + + If quantization is detected, the base operator symbolic function will be wrapped with + argument de-quantization and output quantization. + + Otherwise, only the base symbolic function will be invoked. + + For example: + + ``` + @quantized_args(True, False) + def foo(g, x, y): + return x + y + ``` + + is equivalent to + + ``` + def q_foo(g, x, y): + if is_quantized_tensor(x): + x = dequantize(x) + out = foo(g, x, y) + return quantize(out) + else: + return foo(g, x, y) + ``` + + Args: + arg_q_descriptors: A sequence of bool, where each element represents if the + argument is QTensor for quantized version of this operator. It defaults + to False for unspecified (variable length) arguments. + scale: Quantized output scale. If None, derive from + the first quantized input scale. + zero_point: Quantized output zero point. If None, + derive from the first quantized input zero point. + quantize_output: If True, quantize the output of the base operator. Default is True + """ + + def decorator(fn): + @functools.wraps(fn) + def wrapper(g, *args, **kwargs): + nonlocal scale + nonlocal zero_point + if scale is not None: + _scale = g.op("Constant", value_t=torch.tensor(scale)) + else: + _scale = None + if zero_point is not None: + _zero_point = g.op("Constant", value_t=torch.tensor(zero_point)) + else: + _zero_point = None + + # Support variable length arguments by marking unspecified ones as non-quantized + arg_q_descriptors_extended = arg_q_descriptors + (False,) * ( + len(args) - len(arg_q_descriptors) + ) + descriptor_args = tuple(zip(arg_q_descriptors_extended, args)) + + def _is_arg_quantized(descriptor, arg): + return descriptor and _is_value(arg) and _is_tuple_construct(arg) + + # Run regular symbolic function if none of the argument is QTensor. + is_quantized = list() + for descriptor, arg in descriptor_args: + # ListConstruct + if _is_packed_list(arg): + for arg_input in arg.node().inputs(): + is_quantized.append(_is_arg_quantized(descriptor, arg_input)) + else: + is_quantized.append(_is_arg_quantized(descriptor, arg)) + + if not any(is_quantized): + return fn(g, *args, **kwargs) + + # Dequantize arguments that are quantized + non_quantized_args = [] + for descriptor, arg in descriptor_args: + if _is_arg_quantized(descriptor, arg): + # Quantized arg is a tuple of (value, scale, zero_point) + dequantized_arg, arg_scale, arg_zero_point, _ = dequantize_helper( + g, arg + ) + non_quantized_args.append(dequantized_arg) + # Set scale and zero_point to the first quantized input if not already set + if _scale is None: + _scale = arg_scale + if _zero_point is None: + _zero_point = arg_zero_point + # ListConstruct + elif _is_packed_list(arg): + for arg_input in arg.node().inputs(): + if _is_arg_quantized(descriptor, arg_input): + # Quantized arg is a tuple of (value, scale, zero_point) + ( + dequantized_arg, + arg_scale, + arg_zero_point, + _, + ) = dequantize_helper(g, arg_input) + # Set scale and zero_point to the first quantized input if not already set + if _scale is None: + _scale = arg_scale + if _zero_point is None: + _zero_point = arg_zero_point + arg_input.replaceAllUsesWith(dequantized_arg) + non_quantized_args.append(arg) + else: + # Non-quantized arg + non_quantized_args.append(arg) + # TODO(justinchuby): Only single output is supported for now. We may want to + # support multiple outputs in the future. + output = fn(g, *non_quantized_args, **kwargs) + + assert _scale is not None, "Bug: Scale must be set for quantized operator" + assert ( + _zero_point is not None + ), "Bug: Zero point must be set for quantized operator" + + if quantize_output: + return quantize_helper(g, output, _scale, _zero_point) + return output + + return wrapper + + return decorator + + +@_beartype.beartype +def _scalar(x: Any) -> Optional[Number]: + """Convert a scalar tensor into a Python value.""" + if isinstance(x, torch.Tensor) and x.shape == (): + return x.item() + return None + + +@_beartype.beartype +def _if_scalar_type_as(self, tensor): + """ + Convert self into the same type of tensor, as necessary. + We only support implicit casting for scalars, so we never + actually need to insert an ONNX cast operator here; just + fix up the scalar. + """ + if isinstance(self, _C.Value): + return self + + scalar_type = _type_utils.JitScalarType.from_value( + tensor, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + ty = scalar_type.scalar_name().lower() + return getattr(self, ty)() + return self + + +@_beartype.beartype +def _is_none(x: Any) -> bool: + return x is None or (x.node().mustBeNone() if isinstance(x, _C.Value) else False) + + +@_beartype.beartype +def _is_value(x: Any) -> bool: + return isinstance(x, _C.Value) + + +@_beartype.beartype +def _is_constant(value: Any) -> bool: + return not _is_value(value) or value.node().kind() in { + "onnx::Constant", + "prim::Constant", + } + + +@_beartype.beartype +def _is_tensor(x: _C.Value) -> bool: + return x.type().isSubtypeOf(_C.TensorType.get()) + + +# Note: _C.JitType is not exposed to Python and cannot be checked in runtime. +def _as_list_type(jit_type: _C.JitType) -> Optional[_C.ListType]: + if isinstance(jit_type, _C.ListType): + return jit_type + return None + + +@_beartype.beartype +def _is_list(x: _C.Value) -> bool: + return _as_list_type(x.type()) is not None + + +@_beartype.beartype +def _is_tensor_list(x: _C.Value) -> bool: + x_type = _as_list_type(x.type()) + if x_type is None: + return False + return isinstance(x_type.getElementType(), _C.TensorType) + + +@_beartype.beartype +def _is_scalar_list(x: _C.Value) -> bool: + """Checks if x is a scalar list, for example: List[float], List[int]. + + Besides checking the type is ListType, we also check if the data type is + a valid ONNX data type. + """ + x_type = _as_list_type(x.type()) + if x_type is None: + return False + scalar_type = _type_utils.JitScalarType.from_value(x) + return scalar_type.onnx_compatible() + + +@_beartype.beartype +def _is_tuple_construct(x: _C.Value) -> bool: + return x.node().kind() == "prim::TupleConstruct" + + +@_beartype.beartype +def is_complex_value(x: _C.Value) -> bool: + assert _is_value(x) + return _type_utils.JitScalarType.from_value( + x, _type_utils.JitScalarType.UNDEFINED + ) in { + _type_utils.JitScalarType.COMPLEX32, + _type_utils.JitScalarType.COMPLEX64, + _type_utils.JitScalarType.COMPLEX128, + } + + +@_beartype.beartype +def is_caffe2_aten_fallback() -> bool: + return ( + GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + and _C_onnx._CAFFE2_ATEN_FALLBACK + ) + + +@_beartype.beartype +def _get_tensor_rank(x: _C.Value) -> Optional[int]: + if not _is_tensor(x) or x.type() is None: + return None + x_type = x.type() + x_type = typing.cast(_C.TensorType, x_type) + return x_type.dim() + + +@_beartype.beartype +def _get_tensor_sizes(x: _C.Value, allow_nonstatic: bool = True): + if not _is_tensor(x) or x.type() is None: + return None + x_type = x.type() + x_type = typing.cast(_C.TensorType, x_type) + if allow_nonstatic: + # Each individual symbol is returned as None. + # e.g. [1, "a", "b"] -> [1, None, None] + return x_type.varyingSizes() + # returns None, if exists any symbol in sizes. + # e.g. [1, "a", "b"] -> None + return x_type.sizes() + + +@_beartype.beartype +def _get_tensor_dim_size(x: _C.Value, dim: int) -> Optional[int]: + sizes = _get_tensor_sizes(x) + return sizes[dim] if sizes else None + + +@_beartype.beartype +def _get_dim_for_cross(x: _C.Value, dim: Optional[int]): + if dim == -1: + tensor_rank = _get_tensor_rank(x) + assert tensor_rank is not None + return dim + tensor_rank + # If dim is not given, it defaults to the first dimension found with the size 3 + if dim is None: + sizes = _get_tensor_sizes(x) + assert sizes is not None + for index, size in enumerate(sizes): + if size is not None and size == 3: + return index + return dim + + +@_beartype.beartype +def _unimplemented(op: str, msg: str, value: Optional[_C.Value] = None) -> None: + # For BC reasons, the behavior for Caffe2 does not raise exception for unimplemented operators + if _C_onnx._CAFFE2_ATEN_FALLBACK: + warnings.warn(f"ONNX export failed on {op} because {msg} not supported") + elif GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX: + _onnx_unsupported(f"{op}, {msg}", value) + + +@_beartype.beartype +def _onnx_unsupported(op_name: str, value: Optional[_C.Value] = None) -> NoReturn: + message = ( + f"Unsupported: ONNX export of operator {op_name}. " + f"Please feel free to request support or submit a pull request " + f"on PyTorch GitHub: {_constants.PYTORCH_GITHUB_ISSUES_URL}" + ) + if isinstance(value, _C.Value): + raise errors.SymbolicValueError( + message, + value, + ) + raise errors.OnnxExporterError(message) + + +@_beartype.beartype +def _onnx_opset_unsupported( + op_name: str, + current_opset: int, + supported_opset: int, + value: Optional[_C.Value] = None, +) -> NoReturn: + message = ( + f"Unsupported: ONNX export of {op_name} in opset {current_opset}. " + f"Please try opset version {supported_opset}." + ) + if isinstance(value, _C.Value): + raise errors.SymbolicValueError( + message, + value, + ) + raise errors.OnnxExporterError(message) + + +@_beartype.beartype +def _onnx_opset_unsupported_detailed( + op_name: str, + current_opset: int, + supported_opset: int, + reason: str, + value: Optional[_C.Value] = None, +) -> NoReturn: + message = ( + f"Unsupported: ONNX export of {op_name} in " + f"opset {current_opset}. {reason}. Please try opset version {supported_opset}." + ) + if isinstance(value, _C.Value): + raise errors.SymbolicValueError( + message, + value, + ) + raise errors.OnnxExporterError(message) + + +@_beartype.beartype +def _block_list_in_opset(name: str): + def symbolic_fn(*args, **kwargs): + raise errors.OnnxExporterError( + f"ONNX export failed on {name}, which is not implemented for opset " + f"{GLOBALS.export_onnx_opset_version}. " + "Try exporting with other opset versions." + ) + + return symbolic_fn + + +@_beartype.beartype +def _try_get_scalar_type(*args) -> Optional[_type_utils.JitScalarType]: + for arg in args: + scalar_type = _type_utils.JitScalarType.from_value( + arg, _type_utils.JitScalarType.UNDEFINED + ) + if scalar_type != _type_utils.JitScalarType.UNDEFINED: + return scalar_type + return None + + +@_beartype.beartype +def _select_helper(g: jit_utils.GraphContext, self, dim, index, apply_reshape=True): + index_const = _maybe_get_scalar(index) + index_dim = _get_tensor_rank(index) + if not _is_value(index_const): + # Index is a constant scalar. Make it a size 1 constant tensor. + index = g.op("Constant", value_t=torch.LongTensor([index_const])) + elif index_dim is not None and apply_reshape: + if index_dim == 0: + # Index is a scalar. Reshape it to a size 1 tensor. + index = _reshape_helper( + g, index, g.op("Constant", value_t=torch.LongTensor([1])) + ) + + index_scalar_type = _type_utils.JitScalarType.from_value( + index, _type_utils.JitScalarType.UNDEFINED + ) + if index_scalar_type not in { + _type_utils.JitScalarType.INT64, + _type_utils.JitScalarType.INT, + }: + index = g.op("Cast", index, to_i=_C_onnx.TensorProtoDataType.INT64) + return g.op("Gather", self, index, axis_i=dim) + + +@_beartype.beartype +def _slice_helper( + g: jit_utils.GraphContext, + input, + axes, + starts, + ends, + steps=None, +): + if g.opset <= 9: + from torch.onnx.symbolic_opset9 import _slice as _slice9 + + return _slice9(g, input, axes, starts, ends) + else: + from torch.onnx.symbolic_opset10 import _slice as _slice10 + + return _slice10(g, input, axes, starts, ends, steps) + + +@_beartype.beartype +def _is_fp(value) -> bool: + return _type_utils.JitScalarType.from_value( + value, _type_utils.JitScalarType.UNDEFINED + ) in { + _type_utils.JitScalarType.FLOAT, + _type_utils.JitScalarType.DOUBLE, + _type_utils.JitScalarType.HALF, + _type_utils.JitScalarType.BFLOAT16, + } + + +@_beartype.beartype +def _is_bool(value) -> bool: + return _type_utils.JitScalarType.from_value( + value, _type_utils.JitScalarType.UNDEFINED + ) in {_type_utils.JitScalarType.BOOL} + + +@_beartype.beartype +def _generate_wrapped_number(g: jit_utils.GraphContext, scalar): + """Creates a wrapped number based on https://github.com/pytorch/pytorch/issues/9515. + + A Tensor is a considered a "wrapped number" if it is + auto-wrapped from a C++ or Python number type. Integer types are + wrapped as 0-dim int64 tensors and floating-point types are + wrapped as 0-dim double tensors. + + The input to this function is constant value. If the data type + is a floating point type, it is converted to a 0-dim double + tensor, else it is converted to a 0-dim tensor of its original type + """ + assert not isinstance(scalar, torch.Tensor) + if isinstance(scalar, float): + return g.op("Constant", value_t=torch.tensor(scalar, dtype=torch.double)) + return g.op("Constant", value_t=torch.tensor(scalar)) + + +@_beartype.beartype +def _sort_helper(g: jit_utils.GraphContext, input, dim, decending=True, out=None): + if out is not None: + _unimplemented("Sort", "Out parameter is not supported") + shape_ = g.op("Shape", input) + dim_size_ = g.op( + "Gather", + shape_, + g.op("Constant", value_t=torch.tensor([dim], dtype=torch.int64)), + ) + if g.opset <= 10: + if not decending: + _unimplemented("Sort", "Ascending is not supported") + return g.op("TopK", input, dim_size_, axis_i=dim, outputs=2) + else: + return g.op( + "TopK", input, dim_size_, axis_i=dim, largest_i=decending, outputs=2 + ) + + +@_beartype.beartype +def _topk_helper( + g: jit_utils.GraphContext, input, k, dim, largest=True, sorted=False, out=None +): + if out is not None: + _unimplemented("TopK", "Out parameter is not supported") + if not _is_value(k): + k = g.op("Constant", value_t=torch.tensor([k], dtype=torch.int64)) + else: + k = _reshape_helper(g, k, g.op("Constant", value_t=torch.tensor([1]))) + if _try_get_scalar_type(k) != _type_utils.JitScalarType.INT64: + k = g.op("Cast", k, to_i=_C_onnx.TensorProtoDataType.INT64) + if g.opset <= 10: + if not largest: + _unimplemented("TopK", "Ascending is not supported") + return g.op("TopK", input, k, axis_i=dim, outputs=2) + else: + return g.op( + "TopK", input, k, axis_i=dim, largest_i=largest, sorted_i=sorted, outputs=2 + ) + + +@_beartype.beartype +def _lt_helper(g: jit_utils.GraphContext, input, other): + if g.opset <= 8: + from torch.onnx.symbolic_opset8 import lt as _lt8 + + return _lt8(g, input, other) + else: + from torch.onnx.symbolic_opset9 import lt as _lt9 + + return _lt9(g, input, other) + + +@_beartype.beartype +def _interpolate_warning(interpolate_mode): + onnx_op = ( + "onnx:Resize" if GLOBALS.export_onnx_opset_version >= 10 else "onnx:Upsample" + ) + warnings.warn( + "You are trying to export the model with " + + onnx_op + + " for ONNX opset version " + "" + str(GLOBALS.export_onnx_opset_version) + ". " + "This operator might cause results to not match the expected results by PyTorch.\n" + "ONNX's Upsample/Resize operator did not match Pytorch's Interpolation until opset 11. " + "Attributes to determine how to transform the input were added in onnx:Resize in opset 11 " + "to support Pytorch's behavior (like coordinate_transformation_mode and nearest_mode).\n" + "We recommend using opset 11 and above for models using this operator." + ) + + +@_beartype.beartype +def _unsqueeze_helper(g: jit_utils.GraphContext, input, axes_i): + if _is_constant(axes_i[0]): + if g.opset >= 13: + axes = g.op("Constant", value_t=torch.tensor(axes_i, dtype=torch.long)) + return g.op("Unsqueeze", input, axes) + return g.op("Unsqueeze", input, axes_i=axes_i) + # Tensor type + if g.opset < 13: + raise errors.SymbolicValueError( + "Opset version must be >= 13 for Unsqueeze with dynamic axes.", input + ) + return g.op("Unsqueeze", input, axes_i[0]) + + +@_beartype.beartype +def _squeeze_helper(g: jit_utils.GraphContext, input, axes_i): + if _is_constant(axes_i[0]): + if g.opset >= 13: + axes = g.op("Constant", value_t=torch.tensor(axes_i, dtype=torch.long)) + return g.op("Squeeze", input, axes) + return g.op("Squeeze", input, axes_i=axes_i) + # Tensor type + if g.opset < 13: + raise errors.SymbolicValueError( + "Opset version must be >= 13 for Squeeze with dynamic axes.", input + ) + axes_t = axes_i[0] + axes_rank = _get_tensor_rank(axes_t) + assert axes_rank is not None + if axes_rank > 1: + raise errors.SymbolicValueError( + "For Squeeze axses as input, the axes rank must be one in ONNX spec.", input + ) + elif axes_rank == 0: + # The axes is a scalar. Unsqueeze it to a rank 1 tensor. + axes_t = _unsqueeze_helper(g, axes_t, [0]) + return g.op("Squeeze", input, axes_t) + return g.op("Squeeze", input, axes_t) + + +@_beartype.beartype +def _reducesum_helper( + g: jit_utils.GraphContext, + input, + axes_i=None, + keepdims_i=1, + noop_with_empty_axes_i=0, +): + keepdims_i = _maybe_get_const(keepdims_i, "i") + if g.opset >= 13: + if axes_i: + if not _is_value(axes_i): + axes_i = g.op( + "Constant", value_t=torch.tensor(axes_i, dtype=torch.long) + ) + return g.op( + "ReduceSum", + input, + axes_i, + keepdims_i=keepdims_i, + noop_with_empty_axes_i=noop_with_empty_axes_i, + ) + return g.op( + "ReduceSum", + input, + keepdims_i=keepdims_i, + noop_with_empty_axes_i=noop_with_empty_axes_i, + ) + else: + return g.op("ReduceSum", input, axes_i=axes_i, keepdims_i=keepdims_i) + + +@_beartype.beartype +def _interpolate_size_to_scales(g: jit_utils.GraphContext, input, output_size, dim): + output_size = _maybe_get_const(output_size, "is") + if _is_value(output_size): + offset = 2 + offsets = g.op("Constant", value_t=torch.ones(offset, dtype=torch.float32)) + dividend = g.op("Cast", output_size, to_i=_C_onnx.TensorProtoDataType.FLOAT) + divisor = _slice_helper( + g, g.op("Shape", input), axes=[0], ends=[sys.maxsize], starts=[offset] + ) + divisor = g.op("Cast", divisor, to_i=_C_onnx.TensorProtoDataType.FLOAT) + scale_dims = g.op("Div", dividend, divisor) + scales = g.op("Concat", offsets, scale_dims, axis_i=0) + else: + scales_constant = [ + 1.0 + if i < 2 + else float(output_size[-(dim - i)]) + / float(input.type().sizes()[-(dim - i)]) + for i in range(0, dim) + ] + scales = g.op( + "Constant", value_t=torch.tensor(scales_constant, dtype=torch.float32) + ) + return scales + + +@_beartype.beartype +def _interpolate_get_scales_if_available(g: jit_utils.GraphContext, scales): + available_scales = _maybe_get_const(scales[0], "fs") != -1 and not _is_none( + scales[0] + ) + + if not available_scales: + return None + + offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.float32)) + scales_list = g.op( + "Constant", value_t=torch.tensor(_maybe_get_const(scales[0], "fs")) + ) + scales = g.op("Concat", offsets, scales_list, axis_i=0) + return scales + + +@_beartype.beartype +def _get_interpolate_attributes(g: jit_utils.GraphContext, mode, args): + if mode == "nearest": + align_corners = None + scales = args[0:] + else: + align_corners = args[0] + scales = args[1:] + scales = _interpolate_get_scales_if_available(g, scales) + return scales, align_corners + + +@_beartype.beartype +def _interpolate_get_scales(g: jit_utils.GraphContext, scale_factor, dim): + offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.float32)) + scale_factor_rank = _get_tensor_rank(scale_factor) + if isinstance(scale_factor.type(), _C.ListType) or ( + scale_factor_rank is not None and scale_factor_rank > 0 + ): + return g.op("Concat", offsets, scale_factor, axis_i=0) + else: + scale_factor = _unsqueeze_helper(g, scale_factor, [0]) + scale_factor = g.op( + "Cast", scale_factor, to_i=_C_onnx.TensorProtoDataType.FLOAT + ) + scales = [scale_factor for i in range(dim - 2)] + scale_factor = g.op("Concat", offsets, *scales, axis_i=0) + return scale_factor + + +@_beartype.beartype +def _interpolate_get_scales_and_mode( + g: jit_utils.GraphContext, input, size, scale_factor, mode, align_corners +): + mode = _maybe_get_const(mode, "s") + if "linear" in mode: + mode = "linear" + if "cubic" in mode: + mode = "cubic" + _interpolate_warning(mode) + + align_corners = _maybe_get_const(align_corners, "b") + if isinstance(align_corners, bool) and align_corners: + return _unimplemented("interpolate", "align_corners == True") + + if not input.type().dim(): + return _unimplemented("interpolate", "missing input shape") + dim = input.type().dim() + + if not _is_none(scale_factor): + scale_factor = _interpolate_get_scales(g, scale_factor, dim) + elif not _is_none(size): + if not _is_packed_list(size): + is_scalar = _maybe_get_const(size, "t").dim() == 0 + if is_scalar: + size = _unsqueeze_helper(g, size, [0]) + size = [size for i in range(dim - 2)] + size = g.op("Concat", *size, axis_i=0) + scale_factor = _interpolate_size_to_scales(g, input, size, dim) + else: + return _unimplemented( + "interpolate", "Both size and scales are None in __interpolate" + ) + return scale_factor, mode + + +@_beartype.beartype +def _argmin_argmax_helper( + g: jit_utils.GraphContext, + input: torch._C.Value, + dim: torch._C.Value, + keepdim: bool, + op_name: str, +): + def op_wrapper(input, axis_i, keepdims_i): + if g.opset >= 12: + return g.op( + op_name, + input, + axis_i=axis_i, + keepdims_i=keepdims_i, + select_last_index_i=False, + ) + return g.op(op_name, input, axis_i=axis_i, keepdims_i=keepdims_i) + + if _is_none(dim): + flattened = _reshape_helper( + g, input, g.op("Constant", value_t=torch.tensor([-1])) + ) + output = op_wrapper(flattened, axis_i=0, keepdims_i=False) + if keepdim: + input_shape = g.op("Shape", input) + input_shape_shape = g.op("Shape", input_shape) + new_shape = g.op( + "ConstantOfShape", + input_shape_shape, + value_t=torch.tensor([1], dtype=torch.int64), + ) + output = g.op("Reshape", output, new_shape) + return output + + dim = _parse_arg(dim, "i") + return op_wrapper(input, axis_i=dim, keepdims_i=keepdim) + + +@_beartype.beartype +def _interpolate_helper(name, dim, interpolate_mode): + @quantized_args(True, False, False) + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = _get_interpolate_attributes(g, interpolate_mode, args) + align_corners = _maybe_get_scalar(align_corners) + coordinate_transformation_mode = ( + "asymmetric" + if interpolate_mode == "nearest" + else "align_corners" + if align_corners + else "half_pixel" + ) + + if scales is None: + input_size = g.op("Shape", input) + input_size_beg = _slice_helper( + g, input_size, axes=[0], ends=[2], starts=[0] + ) + output_size = g.op( + "Cast", output_size, to_i=_C_onnx.TensorProtoDataType.INT64 + ) + output_size = g.op("Concat", input_size_beg, output_size, axis_i=0) + + if g.opset >= 13: + empty_roi = _optional_input_placeholder_tensor(g) + empty_scales = _optional_input_placeholder_tensor(g) + else: + empty_roi = g.op( + "Constant", value_t=torch.tensor([], dtype=torch.float32) + ) + empty_scales = g.op( + "Constant", value_t=torch.tensor([], dtype=torch.float32) + ) + + return g.op( + "Resize", + input, + empty_roi, + empty_scales, + output_size, + coordinate_transformation_mode_s=coordinate_transformation_mode, + cubic_coeff_a_f=-0.75, # only valid when mode="cubic" + mode_s=interpolate_mode, # nearest, linear, or cubic + nearest_mode_s="floor", + ) # only valid when mode="nearest" + else: + if g.opset >= 13: + empty_roi = _optional_input_placeholder_tensor(g) + else: + empty_roi = g.op( + "Constant", value_t=torch.tensor([], dtype=torch.float32) + ) + + return g.op( + "Resize", + input, + empty_roi, + scales, + coordinate_transformation_mode_s=coordinate_transformation_mode, + cubic_coeff_a_f=-0.75, # only valid when mode="cubic" + mode_s=interpolate_mode, # nearest, linear, or cubic + nearest_mode_s="floor", + ) # only valid when mode="nearest" + + return symbolic_fn + + +@_beartype.beartype +def __interpolate_helper( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, +): + mode = _maybe_get_const(mode, "s") + if "linear" in mode: + mode = "linear" + if "cubic" in mode: + mode = "cubic" + align_corners = _maybe_get_const(align_corners, "b") + align_corners = False if not isinstance(align_corners, bool) else align_corners + coordinate_transformation_mode = ( + "asymmetric" + if mode == "nearest" + else "align_corners" + if align_corners + else "half_pixel" + ) + + if not _is_none(size): + input_size = g.op("Shape", input) + input_size = _slice_helper(g, input_size, axes=[0], ends=[2], starts=[0]) + # in some cases size is not a packed list but size is a scalar + # We need to also verify that (_maybe_get_const(size, "t").dim() == 0) + # but this information is not always available. Try to get the dim, + # and if not assume that it is not a scalar. + try: + is_scalar = not _is_packed_list(size) and ( + _maybe_get_const(size, "t").dim() == 0 + ) + except AttributeError: + is_scalar = not _is_packed_list(size) + if not is_scalar: + warnings.warn( + "Cannot verify if the output_size is a scalar " + "while exporting interpolate. Assuming that it is not a scalar." + ) + + if is_scalar: + rank = _get_tensor_rank(input) + if rank is None: + return _unimplemented( + "interpolate (with a scalar output_size)", + "missing input shape (try giving an array of output_size values)", + ) + size = _unsqueeze_helper(g, size, [0]) + size = [size for i in range(rank - 2)] + size = g.op("Concat", *size, axis_i=0) + size = g.op("Cast", size, to_i=_C_onnx.TensorProtoDataType.INT64) + size = g.op("Concat", input_size, size, axis_i=0) + + if g.opset >= 13: + empty_roi = _optional_input_placeholder_tensor(g) + empty_scales = _optional_input_placeholder_tensor(g) + else: + empty_roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32)) + empty_scales = g.op( + "Constant", value_t=torch.tensor([], dtype=torch.float32) + ) + + return g.op( + "Resize", + input, + empty_roi, + empty_scales, + size, + coordinate_transformation_mode_s=coordinate_transformation_mode, + cubic_coeff_a_f=-0.75, # only valid when mode="cubic" + mode_s=mode, # nearest, linear, or cubic + nearest_mode_s="floor", + ) + else: # if not _is_none(scales) + rank = _get_tensor_rank(input) + if rank is None: + return _unimplemented("interpolate (with scales)", "missing input shape") + + if g.opset >= 13: + empty_roi = _optional_input_placeholder_tensor(g) + else: + empty_roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32)) + + scales = _interpolate_get_scales(g, scale_factor, rank) + return g.op( + "Resize", + input, + empty_roi, + scales, + coordinate_transformation_mode_s=coordinate_transformation_mode, + cubic_coeff_a_f=-0.75, # only valid when mode="cubic" + mode_s=mode, # nearest, linear, or cubic + nearest_mode_s="floor", + ) # only valid when mode="nearest" + + +@_beartype.beartype +def _unbind_helper(g: jit_utils.GraphContext, self, dim, _outputs): + if g.opset < 11: + from torch.onnx.symbolic_opset9 import unbind + elif g.opset <= 12: + from torch.onnx.symbolic_opset11 import unbind # type: ignore[no-redef] + else: + from torch.onnx.symbolic_opset13 import unbind # type: ignore[no-redef] + return unbind(g, self, dim, _outputs) + + +@_beartype.beartype +def _scatter_helper(g: jit_utils.GraphContext, self, dim, index, src): + if g.opset <= 10: + from torch.onnx.symbolic_opset9 import scatter + else: + # for mypy, scatter was imported two lines above + from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef] + return scatter(g, self, dim, index, src) + + +@_beartype.beartype +def _repeat_interleave_split_helper(g: jit_utils.GraphContext, self, reps, dim): + if g.opset <= 12: + split_out = g.op("Split", self, split_i=[1] * reps, axis_i=dim, outputs=reps) + else: + from torch.onnx.symbolic_opset13 import split + + repeats = g.op("Constant", value_t=torch.tensor([1] * reps)) + split_out = split(g, self, repeats, dim, _outputs=reps) + return split_out if reps > 1 else [split_out] + + +@_beartype.beartype +def _repeat_interleave_single_value_repeat_helper( + g: jit_utils.GraphContext, self, repeats, dim +): + from torch.onnx.symbolic_opset9 import flatten, unsqueeze + + if not _is_tensor(repeats): + repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) + + const_repeats: bool = _is_constant(repeats) + reps = _maybe_get_const(repeats, "t") + + # Convert 'repeats' to 1-d if it is 0-d. + if _get_tensor_rank(repeats) == 0: + repeats = g.op("Reshape", repeats, g.op("Constant", value_t=torch.tensor([1]))) + + # Create a new dim of size 1, then expand it to be 'repeats' long, and finally collapse it. + unsqueezed = unsqueeze(g, self, dim + 1) + + # repeats_per_dim is 1 for all dims except for the new unsqueezed dim, where it has value 'repeats'. + if const_repeats: + # 'Repeats' is a constant, 'repeats_per_dim' can be a constant. + onehot = torch.ones(_get_tensor_rank(unsqueezed), dtype=torch.int64) + onehot[dim + 1] = reps + repeats_per_dim = g.op("Constant", value_t=onehot) + else: + # 'Repeats' is a variable, 'repeats_per_dim' cannot be a constant. + onehot = g.op( + "OneHot", + unsqueeze(g, dim + 1, 0), # indices, must be >= 1-dimensional + g.op( + "Constant", value_t=torch.tensor(_get_tensor_rank(unsqueezed)) + ), # depth + g.op( + "Concat", g.op("Constant", value_t=torch.tensor([1])), repeats, axis_i=0 + ), # on/off values + ) + repeats_per_dim = flatten(g, onehot, 0, 1) + + tiled = g.op("Tile", unsqueezed, repeats_per_dim) + return flatten(g, tiled, dim, dim + 1) + + +@_beartype.beartype +def _arange_cast_helper( + g: jit_utils.GraphContext, end, start=None, step=None, dtype=None +) -> Tuple[ + _type_utils.JitScalarType, + Optional[_C.Value], + Optional[_C.Value], + Optional[_C.Value], +]: + def _is_all_integral(scalars): + for scalar in scalars: + scalar_type = _type_utils.JitScalarType.from_value( + scalar, _type_utils.JitScalarType.UNDEFINED + ) + if ( + scalar_type != _type_utils.JitScalarType.INT64 + and scalar_type != _type_utils.JitScalarType.UNDEFINED + ): + return False + return True + + # This logic is based on torch.arange docs. If "dtype" is provided, + # infer input types from dtype. If not, then check if any of start, stop, + # or step are floating point, and infer the type from get_default. + # Otherwise, the dtype is inferred to be torch.int64. + if dtype is None or (_is_value(dtype) and _is_none(dtype)): + if _is_all_integral([start, end, step]): + scalar_type = _type_utils.JitScalarType.INT64 + else: + scalar_type = _type_utils.JitScalarType.from_dtype( + torch.get_default_dtype() + ) + else: + assert isinstance(dtype, int) + # TODO(justinchuby): Check if dtype is indeed a int. + scalar_type = _type_utils.JitScalarType(dtype) + + start = g.op("Cast", start, to_i=scalar_type.onnx_type()) if start else None + end = g.op("Cast", end, to_i=scalar_type.onnx_type()) if end else None + step = g.op("Cast", step, to_i=scalar_type.onnx_type()) if step else None + return scalar_type, end, start, step + + +@_beartype.beartype +def _arange_helper(g: jit_utils.GraphContext, *args): + if g.opset <= 10: + from torch.onnx.symbolic_opset9 import arange + else: + from torch.onnx.symbolic_opset11 import arange # type: ignore[no-redef] + return arange(g, *args) + + +@_beartype.beartype +def _size_helper(g: jit_utils.GraphContext, self, dim): + full_shape = g.op("Shape", self) + from torch.onnx.symbolic_opset9 import select + + return select(g, full_shape, g.op("Constant", value_t=torch.tensor([0])), dim) + + +@_beartype.beartype +def _index_fill_reshape_helper(g: jit_utils.GraphContext, self, dim, index): + # 1. reshape index => [1, ..., 1, dim, 1, ..., 1] + # 2. expand index => [..., dim, ...], same shape as self except for dim. + # 3. expand value as well. + # 4. apply onnx::scatter. + + from torch.onnx.symbolic_opset9 import expand + + if g.opset <= 10: + from torch.onnx.symbolic_opset9 import scatter + else: + # for mypy, scatter was imported two lines above + from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef] + + if self.type().dim() is None: + return _unimplemented("index_fill", "input rank not accessible") + self_dim = self.type().dim() + dim_value = _parse_arg(dim, "i") + if dim_value < 0: + dim_value += self_dim + unsqueezed_index = _unsqueeze_helper( + g, index, [i for i in range(self_dim) if i != dim_value] + ) + expanded_index_shape = scatter( + g, g.op("Shape", self), 0, _unsqueeze_helper(g, dim, [0]), g.op("Shape", index) + ) + expanded_index = expand(g, unsqueezed_index, expanded_index_shape, None) + return expanded_index_shape, expanded_index + + +# By default, when any value in the 'shape' input is equal to zero +# the corresponding dimension value is copied from the input tensor dynamically. +# allowzero=1 indicates that if any value in the 'shape' input is set to zero, +# the zero value is honored, similar to NumPy. +# allowzero=1 is only supported for opset version >= 14. +@_beartype.beartype +def _reshape_helper(g: jit_utils.GraphContext, input, shape, allowzero=0): + shape = _maybe_get_const(shape, "is") + if not _is_value(shape): + shape = g.op("Constant", value_t=torch.LongTensor(shape)) + if g.opset <= 13: + if allowzero == 1: + _onnx_opset_unsupported( + "Reshape with allowzero=1", GLOBALS.export_onnx_opset_version, 14, input + ) + return g.op("Reshape", input, shape) + else: + return g.op("Reshape", input, shape, allowzero_i=allowzero) + + +@_beartype.beartype +def _batchnorm_helper( + g: jit_utils.GraphContext, input, weight, bias, running_mean, running_var +): + from torch.onnx.symbolic_opset9 import _var_mean + + batch_size = _get_tensor_dim_size(input, 0) + channel_size = _get_tensor_dim_size(input, 1) + + if weight is None or _is_none(weight): + if channel_size is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of batch_norm for unknown channel size.", + input, + ) + weight_value = torch.tensor( + [1.0] * channel_size, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ) + weight = g.op("Constant", value_t=weight_value) + if bias is None or _is_none(bias): + if channel_size is None: + raise errors.SymbolicValueError( + "Unsupported: ONNX export of batch_norm for unknown channel size.", + input, + ) + bias_value = torch.tensor( + [0.0] * channel_size, + dtype=_type_utils.JitScalarType.from_value(input).dtype(), + ) + bias = g.op("Constant", value_t=bias_value) + # If track_running_stats is set to False batch statistics are instead used during evaluation time + if ( + running_mean is None + or _is_none(running_mean) + or running_var is None + or _is_none(running_var) + ): + assert batch_size is not None and channel_size is not None + reshape_in = _reshape_helper( + g, + input, + g.op( + "Constant", + value_t=torch.tensor([batch_size, channel_size, -1], dtype=torch.int64), + ), + ) + trans_in = g.op("Transpose", reshape_in, perm_i=[0, 2, 1]) + running_var, running_mean = _var_mean( + g, + trans_in, + g.op("Constant", value_t=torch.tensor([0, 1], dtype=torch.int64)), + False, + False, + ) + return weight, bias, running_mean, running_var + + +@_beartype.beartype +def _avgpool_helper( + tuple_fn: Callable[[Any], Sequence[int]], + padding: Union[int, Sequence[int]], + kernel_size, + stride, + divisor_override, + name, +) -> Tuple[int, ...]: + if divisor_override and divisor_override.node().kind() != "prim::Constant": + _unimplemented(name, "divisor_override") + return tuple(tuple_fn(padding)) + + +@_beartype.beartype +def check_training_mode(op_train_mode: int, op_name: str) -> None: + """Warns the user if the model's training mode and the export mode do not agree.""" + if GLOBALS.training_mode == _C_onnx.TrainingMode.PRESERVE: + return + + if op_train_mode: + op_mode_enum = _C_onnx.TrainingMode.TRAINING + else: + op_mode_enum = _C_onnx.TrainingMode.EVAL + if op_mode_enum == GLOBALS.training_mode: + # The modes agree. Do nothing + return + + op_mode_text = f"train={bool(op_train_mode)}" + # Setting the model mode could result in op_mode != GLOBALS.training_mode + # if the model is a FuncModule. In this case we warn the user of + # the state and export depending on op_mode + # This is to support use-cases of fixing certain layer weights + # in training. + warnings.warn( + f"ONNX export mode is set to {GLOBALS.training_mode}, but operator '{op_name}' " + f"is set to {op_mode_text}. Exporting with {op_mode_text}." + ) + + +@_beartype.beartype +def _flatten_helper(g: jit_utils.GraphContext, input, start_dim, end_dim, dim): + input_size = g.op("Shape", input) + slice1 = _slice_helper(g, input_size, axes=[0], starts=[0], ends=[start_dim]) + slices = [slice1, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long))] + if end_dim < dim - 1: + slice3 = _slice_helper( + g, input_size, axes=[0], starts=[end_dim + 1], ends=[dim] + ) + slices = [ + slice1, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)), + slice3, + ] + + final_shape = g.op("Concat", *slices, axis_i=0) + from torch.onnx.symbolic_opset9 import _reshape_from_tensor + + return _reshape_from_tensor(g, input, final_shape) + + +@_beartype.beartype +def _is_split_static(split_size_or_sizes, _outputs): + if _outputs is None: + return False + if ( + _is_value(split_size_or_sizes) + and split_size_or_sizes.node().kind() != "onnx::Constant" + ): + return False + return True + + +@_beartype.beartype +def _optional_input_placeholder_tensor(g): + n = g.op("prim::Constant") + n.setType(_C.OptionalType.ofTensor()) + return n + + +@_beartype.beartype +def _handle_reduce_dim_none(g: jit_utils.GraphContext, self, op_name): + rank = _get_tensor_rank(self) + if rank is not None and any( + _get_tensor_dim_size(self, i) == 0 for i in range(rank) + ): + # If input tensor is empty, according to ONNX ReduceSum definition, + # set keepdims=1 so that the resulted tensor has the same rank as the input. + return g.op(op_name, self, keepdims_i=1) + return g.op(op_name, self, keepdims_i=0) + + +@_beartype.beartype +def dequantize_helper( + g: jit_utils.GraphContext, + qtensor: _C.Value, + qdtype: Optional[_C_onnx.TensorProtoDataType] = None, +) -> Tuple[_C.Value, _C.Value, _C.Value, Optional[_C.Value]]: + """Appends to graph `g` ONNX nodes that dequantizes `qtensor` into `tensor`. + + Args: + g: Graph, the ONNX IR graph that is under construction. + qtensor: torch._C.Value, either a tuple of (quantized_tensor, scale, zero_point) + for per tensor quantization, or + (quantized_tensor, scale, zero_point, axis) for per channel quantization, + representing the quantized tensor. + qdtype: torch.onnx.TensorProtoDataType default None, if not None, represents the + data type of quantized tensor. It must be either + torch.onnx.TensorProtoDataType.UINT8 or torch.onnx.TensorProtoDataType.INT8. + """ + unpacked_qtensors = _unpack_quantized_tensor(qtensor) + tensor, scale, zero_point = unpacked_qtensors[:3] + axis = unpacked_qtensors[3] if len(unpacked_qtensors) >= 4 else None + axis_i = _get_const(axis, "i", "axis") + input_qdtype = _type_utils.JitScalarType.from_value(tensor) + if qdtype is None: + if input_qdtype is not None: + qdtype = input_qdtype.onnx_type() + else: + qdtype = _C_onnx.TensorProtoDataType.UINT8 + value = g.op("Cast", tensor, to_i=qdtype) + scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT) + zero_point = g.op("Cast", zero_point, to_i=qdtype) + + if axis_i is not None and GLOBALS.export_onnx_opset_version < 13: + _onnx_opset_unsupported_detailed( + "DequantizeLinear", + GLOBALS.export_onnx_opset_version, + 13, + "Attribute axis is not supported.", + qtensor, + ) + + return ( + g.op("DequantizeLinear", value, scale, zero_point, axis_i=axis_i), + scale, + zero_point, + axis, + ) + + +@_beartype.beartype +def quantize_helper( + g: jit_utils.GraphContext, + tensor: _C.Value, + scale: _C.Value, + zero_point: _C.Value, + axis: Optional[_C.Value] = None, +) -> _C.Value: + """Appends to graph `g` ONNX nodes that quantizes `tensor` based on `scale`, `zero_point` and `axis`. + + Args: + g: Graph, the ONNX IR graph that is under construction. + tensor: torch._C.Value, representing the tensor to be quantized. + scale: torch._C.Value, quantized scale. + zero_point: torch._C.Value, quantized zero point. + axis: Optional[torch._C.Value] default None, if None, represents per tensor quantization. + Otherwise, represents per channel quantization, along given axis. + + Returns: + A TupleConstruct storing information of the quantized tensor. + """ + if ( + axis is not None + and not _is_none(axis) + and GLOBALS.export_onnx_opset_version < 13 + ): + _onnx_opset_unsupported_detailed( + "QuantizeLinear", + GLOBALS.export_onnx_opset_version, + 13, + "Attribute axis is not supported.", + tensor, + ) + + assert scale is not None + if ( + _type_utils.JitScalarType.from_value(scale, _type_utils.JitScalarType.UNDEFINED) + != _type_utils.JitScalarType.FLOAT + ): + scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT) + + assert zero_point is not None + if _type_utils.JitScalarType.from_value( + zero_point, _type_utils.JitScalarType.UNDEFINED + ) not in { + _type_utils.JitScalarType.UINT8, + _type_utils.JitScalarType.INT8, + }: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8) + output = g.op( + "QuantizeLinear", + tensor, + scale, + zero_point, + axis_i=_get_const(axis, "i", "axis"), + ) + args = [output, scale, zero_point] + if axis is not None and not _is_none(axis): + args.append(axis) + return g.op("prim::TupleConstruct", *args) + + +@_beartype.beartype +def requantize_bias_helper( + g: jit_utils.GraphContext, bias, input_scale, weight_scale, axis=None +): + """In PyTorch, bias is float and is quantized to int32 implicitly inside the quantized ATen op kernel. + In ONNX we need to make the quantization explicit because operators expect all of their inputs to be quantized. + Since int32 is not a supported output type by ONNX operator `QuantizeLinear`, quantization is exported using + regular operators. + """ + bias_scale = g.op("Mul", weight_scale, input_scale) + bias_scale_shape = g.op("Shape", bias_scale) + bias_zero_point = g.op( + "ConstantOfShape", bias_scale_shape, value_t=torch.tensor([0], dtype=torch.int) + ) + q_bias = g.op( + "Cast", g.op("Div", bias, bias_scale), to_i=_C_onnx.TensorProtoDataType.INT32 + ) + axis_args = [] + if axis is not None and not _is_none(axis): + axis_args.append(axis) + return g.op("prim::TupleConstruct", q_bias, bias_scale, bias_zero_point, *axis_args) + + +@_beartype.beartype +def args_have_same_dtype(args): + assert args + base_dtype = _type_utils.JitScalarType.from_value(args[0]) + has_same_dtype = all( + _type_utils.JitScalarType.from_value(elem) == base_dtype for elem in args + ) + return has_same_dtype + + +# Deprecated. Internally use _type_utils.ScalarType +# TODO: remove these once we support Type's in the JIT IR and we can once again +# use the unified toType operator +cast_pytorch_to_onnx = { + "Byte": _C_onnx.TensorProtoDataType.UINT8, + "Char": _C_onnx.TensorProtoDataType.INT8, + "Double": _C_onnx.TensorProtoDataType.DOUBLE, + "Float": _C_onnx.TensorProtoDataType.FLOAT, + "Half": _C_onnx.TensorProtoDataType.FLOAT16, + "Int": _C_onnx.TensorProtoDataType.INT32, + "Long": _C_onnx.TensorProtoDataType.INT64, + "Short": _C_onnx.TensorProtoDataType.INT16, + "Bool": _C_onnx.TensorProtoDataType.BOOL, + "ComplexFloat": _C_onnx.TensorProtoDataType.COMPLEX64, + "ComplexDouble": _C_onnx.TensorProtoDataType.COMPLEX128, + "BFloat16": _C_onnx.TensorProtoDataType.BFLOAT16, + "Undefined": _C_onnx.TensorProtoDataType.UNDEFINED, +} + +# Deprecated. Internally use _type_utils.ScalarType +scalar_name_to_pytorch = { + "uint8_t": "Byte", + "int8_t": "Char", + "double": "Double", + "float": "Float", + "half": "Half", + "int": "Int", + "int64_t": "Long", + "int16_t": "Short", + "bool": "Bool", + "complex64": "ComplexFloat", + "complex128": "ComplexDouble", + "qint8": "QInt8", + "quint8": "QUInt8", + "qint32": "QInt32", + "bfloat16": "BFloat16", +} + + +# Deprecated. Internally use _type_utils.ScalarType +# This indicates each scalar type's corresponding +# torch type. Related source: +# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h +scalar_type_to_pytorch_type = [ + torch.uint8, # 0 + torch.int8, # 1 + torch.short, # 2 + torch.int, # 3 + torch.int64, # 4 + torch.half, # 5 + torch.float, # 6 + torch.double, # 7 + torch.complex32, # 8 + torch.complex64, # 9 + torch.complex128, # 10 + torch.bool, # 11 + torch.qint8, # 12 + torch.quint8, # 13 + torch.qint32, # 14 + torch.bfloat16, # 15 +] + +# Deprecated. Internally use _type_utils.ScalarType +# source of truth is +# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp +pytorch_name_to_type = { + "Byte": torch.uint8, + "Char": torch.int8, + "Double": torch.double, + "Float": torch.float, + "Half": torch.half, + "Int": torch.int, + "Long": torch.int64, + "Short": torch.short, + "Bool": torch.bool, + "ComplexFloat": torch.complex64, + "ComplexDouble": torch.complex128, + "QInt8": torch.qint8, + "QUInt8": torch.quint8, + "QInt32": torch.qint32, + "BFloat16": torch.bfloat16, +} + + +# Deprecated. Internally use _type_utils.ScalarType +scalar_type_to_onnx = [ + cast_pytorch_to_onnx["Byte"], # 0 + cast_pytorch_to_onnx["Char"], # 1 + cast_pytorch_to_onnx["Short"], # 2 + cast_pytorch_to_onnx["Int"], # 3 + cast_pytorch_to_onnx["Long"], # 4 + cast_pytorch_to_onnx["Half"], # 5 + cast_pytorch_to_onnx["Float"], # 6 + cast_pytorch_to_onnx["Double"], # 7 + cast_pytorch_to_onnx["Undefined"], # 8 + cast_pytorch_to_onnx["ComplexFloat"], # 9 + cast_pytorch_to_onnx["ComplexDouble"], # 10 + cast_pytorch_to_onnx["Bool"], # 11 + cast_pytorch_to_onnx["Char"], # 12 + cast_pytorch_to_onnx["Byte"], # 13 + cast_pytorch_to_onnx["Int"], # 14 + cast_pytorch_to_onnx["BFloat16"], # 15 +] + +# Global set to store the list of quantized operators in the network. +# This is currently only used in the conversion of quantized ops from PT -> C2 via ONNX. +_quantized_ops: Set[int] = set() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py new file mode 100644 index 0000000000000000000000000000000000000000..19197c0bdc788804e5c58833ed2e3949131dbe3d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py @@ -0,0 +1,1233 @@ +from __future__ import annotations + +import functools +import sys +import warnings +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch._C._onnx as _C_onnx +import torch.onnx +from torch import _C + +# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics +from torch.onnx import ( + _constants, + _type_utils, + errors, + symbolic_helper, + symbolic_opset9 as opset9, +) +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +# This file exports ONNX ops for opset 10 +# Opset 10 is supported by ONNX release 1.5.0 +# release on 04/24/19 + + +__all__ = [ + "dequantize", + "div", + "embedding_bag", + "fake_quantize_per_tensor_affine", + "flip", + "fmod", + "isfinite", + "isinf", + "nan_to_num", + "quantize_per_tensor", + "quantized_add_relu", + "quantized_add", + "quantized_cat", + "quantized_conv1d_relu", + "quantized_conv2d_relu", + "quantized_conv3d_relu", + "quantized_conv1d", + "quantized_conv2d", + "quantized_conv3d", + "quantized_conv_transpose1d", + "quantized_conv_transpose2d", + "quantized_conv_transpose3d", + "quantized_group_norm", + "quantized_hardswish", + "quantized_instance_norm", + "quantized_layer_norm", + "quantized_leaky_relu", + "quantized_linear", + "quantized_linear_relu", + "quantized_mul", + "quantized_sigmoid", + "slice", + "sort", + "topk", +] + + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=10) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +@_onnx_symbolic("aten::div") +@_beartype.beartype +def div(g: jit_utils.GraphContext, self, other, *args): + if len(args) == 0: + return opset9.true_divide(g, self, other) + else: + return _div_rounding_mode(g, self, other, *args) + + +@symbolic_helper.parse_args("v", "v", "s") +@_beartype.beartype +def _div_rounding_mode(g: jit_utils.GraphContext, self, other, rounding_mode): + if rounding_mode == "floor": + return _floor_divide(g, self, other) + else: + return opset9._div_rounding_mode(g, self, other, rounding_mode) + + +@_onnx_symbolic("aten::_floor_divide") +@_beartype.beartype +def _floor_divide(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): + out = opset9.true_divide(g, self, other) + return g.op("Floor", out) + else: + # Integer division does trunction rounding + div = g.op("Div", self, other) + # Division is negative if: self < 0 != other < 0 + zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)) + negative = g.op("Xor", g.op("Less", self, zero), g.op("Less", other, zero)) + + # For negative numbers with self % other != 0, subtract 1 to round down instead of up + mod = g.op("Mod", self, other, fmod_i=0) + fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero))) + + one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)) + fixup = g.op("Sub", div, one) + return g.op("Where", fixup_mask, fixup, div) + + +@_onnx_symbolic("aten::sort") +@symbolic_helper.parse_args("v", "i", "i", "none") +@_beartype.beartype +def sort(g: jit_utils.GraphContext, self, dim, decending, out=None): + return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out) + + +@_onnx_symbolic("aten::topk") +@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none") +@_beartype.beartype +def topk(g: jit_utils.GraphContext, self, k, dim, largest, sorted, out=None): + return symbolic_helper._topk_helper( + g, self, k, dim, largest=largest, sorted=sorted, out=out + ) + + +def _aten_max_pool_onnx( + g: jit_utils.GraphContext, + self: _C.Value, + kernel_shape: Sequence[int], + strides: Sequence[int], + pads: Sequence[int], + dilations: Sequence[int], + ceil_mode: bool, + unbatched_rank: int, +) -> _C.Value: + self_rank = g.op("Size", g.op("Shape", self)) + if self_rank == unbatched_rank: # C,H,W -> N,C,H,W and N=1 + self = g.op( + "Unsqueeze", + self, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + pool_result, _ = g.op( + "MaxPool", + self, + outputs=2, + ceil_mode_i=ceil_mode, + dilations_i=dilations, + kernel_shape_i=kernel_shape, + pads_i=pads, + strides_i=strides, + ) + + if self_rank == unbatched_rank: + pool_result = g.op( + "Squeeze", + pool_result, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + return pool_result + + +# For MaxPool +def _adjust_attributes_of_max_pool( + expand_size: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + padding: Union[Sequence[int], int], + dilation: Union[Sequence[int], int], +) -> Tuple[Sequence[int], Sequence[int], Sequence[int], Sequence[int]]: + """Adjust attributes of avg_pool to match ONNX specification.""" + + if isinstance(dilation, int): + dilation = [dilation] * expand_size + + if isinstance(kernel_size, int): + kernel_shape = [kernel_size] * expand_size + else: + kernel_shape = kernel_size # type: ignore[assignment] + + if isinstance(padding, int): + pads = [padding] * expand_size * 2 # type: ignore[operator, assignment] + elif len(padding) == 1: + pads = padding * expand_size * 2 # type: ignore[operator, assignment] + elif len(padding) == 2: + # 2D padding + pads = padding * 2 # type: ignore[operator, assignment] + elif len(padding) == 3: + # 3D padding + pads = padding * 2 # type: ignore[operator, assignment] + else: + # When padding is already done for all dimensions, + # we don't need to double it + # eg: (1, 1, 1, 1, 1, 1) + pads = padding # type: ignore[assignment] + + if isinstance(stride, int): + strides = [stride] * expand_size + elif not stride: + strides = kernel_shape + else: + strides = stride # type: ignore[assignment] + + return (kernel_shape, strides, pads, dilation) + + +def _aten_max_pool_with_indices_onnx( + g: jit_utils.GraphContext, + self: _C.Value, + kernel_shape: Sequence[int], + strides: Sequence[int], + pads: Sequence[int], + dilations: Sequence[int], + ceil_mode: bool, + unbatched_rank: int, + n_dims_one: Sequence[int], + n_dims_zero: Sequence[int], + n_dims_axes: Sequence[int], +) -> Tuple[_C.Value, Sequence[int]]: + self_rank = g.op("Size", g.op("Shape", self)) + if self_rank == unbatched_rank: # C,H,W -> N,C,H,W and N=1 + self = g.op( + "Unsqueeze", + self, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + pool_result, indices = g.op( + "MaxPool", + self, + outputs=2, + ceil_mode_i=ceil_mode, + dilations_i=dilations, + kernel_shape_i=kernel_shape, + pads_i=pads, + strides_i=strides, + ) + _, flatten_indices = g.op( + "MaxPool", + self, + outputs=2, + dilations_i=dilations, + kernel_shape_i=n_dims_one, + strides_i=n_dims_one, + ) + + ends = g.op("Constant", value_t=torch.tensor(n_dims_one)) + starts = g.op("Constant", value_t=torch.tensor(n_dims_zero)) + axes = g.op("Constant", value_t=torch.tensor(n_dims_axes)) + + delta = g.op("Slice", flatten_indices, starts, ends, axes) + indices = g.op("Sub", indices, delta) + + if self_rank == unbatched_rank: + pool_result = g.op( + "Squeeze", pool_result, value_t=torch.tensor([0], dtype=torch.int64) + ) + indices = g.op("Squeeze", indices, value_t=torch.tensor([0], dtype=torch.int64)) + + return (pool_result, indices) + + +@_onnx_symbolic( + "aten::max_pool1d", + decorate=[_apply_params("max_pool1d", 1, return_indices=False)], +) +@_onnx_symbolic( + "aten::max_pool2d", + decorate=[_apply_params("max_pool2d", 2, return_indices=False)], +) +@_onnx_symbolic( + "aten::max_pool3d", + decorate=[_apply_params("max_pool3d", 3, return_indices=False)], +) +@_onnx_symbolic( + "aten::max_pool1d_with_indices", + decorate=[ + _apply_params( + "max_pool1d_with_indices", + 1, + return_indices=True, + ) + ], +) +@_onnx_symbolic( + "aten::max_pool2d_with_indices", + decorate=[ + _apply_params( + "max_pool2d_with_indices", + 2, + return_indices=True, + ) + ], +) +@_onnx_symbolic( + "aten::max_pool3d_with_indices", + decorate=[ + _apply_params( + "max_pool3d_with_indices", + 3, + return_indices=True, + ) + ], +) +@_beartype.beartype +def _max_pool(name: str, expand_size: int, return_indices: bool): + @symbolic_helper.quantized_args(True, False, False, False, False, False) + @symbolic_helper.parse_args("v", "is", "is", "is", "is", "i") + def symbolic_fn( + g: jit_utils.GraphContext, + input: _C.Value, + kernel_size: Sequence[int], + stride: Sequence[int], + padding: Union[int, Sequence[int]], + dilation: Sequence[int], + ceil_mode: bool, + ): + kernel_shape, strides, pads, dilations = _adjust_attributes_of_max_pool( + expand_size, kernel_size, stride, padding, dilation + ) + + if return_indices: + return _aten_max_pool_with_indices_onnx( + g, + input, + kernel_shape, + strides, + pads, + dilations, + ceil_mode, + expand_size + 1, + ([1] * expand_size), + ([0] * expand_size), + ([2 + i for i in range(expand_size)]), + ) + else: + return _aten_max_pool_onnx( + g, + input, + kernel_shape, + strides, + pads, + dilations, + ceil_mode, + expand_size + 1, + ) + + return symbolic_fn + + +# For AvgPool +def _adjust_attributes_of_avg_pool( + expand_size: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + padding: Union[Sequence[int], int], +) -> Tuple[Sequence[int], Sequence[int], Sequence[int]]: + """Adjust attributes of avg_pool to match ONNX specification.""" + + if isinstance(kernel_size, int): + kernel_shape = [kernel_size] * expand_size + else: + kernel_shape = kernel_size # type: ignore[assignment] + + if isinstance(padding, int): + pads = [padding] * expand_size * 2 + elif len(padding) == 1: + pads = padding * expand_size * 2 # type: ignore[operator, assignment] + elif len(padding) == 2: + pads = padding * expand_size # type: ignore[operator, assignment] + else: + pads = padding * 2 # type: ignore[operator, assignment] + + if isinstance(stride, int): + strides = [stride] * expand_size + elif not stride: + strides = kernel_shape + else: + strides = stride # type: ignore[assignment] + + return (kernel_shape, strides, pads) + + +@_onnx_symbolic( + "aten::avg_pool1d", + decorate=[_apply_params("avg_pool1d", 1)], +) +@_onnx_symbolic( + "aten::avg_pool2d", + decorate=[_apply_params("avg_pool2d", 2)], +) +@_onnx_symbolic( + "aten::avg_pool3d", + decorate=[_apply_params("avg_pool3d", 3)], +) +@_beartype.beartype +def _avg_pool(name, expand_size): + @symbolic_helper.quantized_args(True, False, False, False, False, False, False) + @symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none") + @_beartype.beartype + def symbolic_fn( + g, + input: _C.Value, + kernel_size: Sequence[int], + stride: Sequence[int], + padding: Union[int, Sequence[int]], + ceil_mode: int, + count_include_pad: int, + divisor_override=None, + ): + kernel_shape, strides, pads = _adjust_attributes_of_avg_pool( + expand_size, kernel_size, stride, padding + ) + + result = g.op( + "AveragePool", + input, + ceil_mode_i=ceil_mode, + count_include_pad_i=count_include_pad, + kernel_shape_i=kernel_shape, + pads_i=pads, + strides_i=strides, + ) + + return result + + return symbolic_fn + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[_apply_params("upsample_nearest1d", 3, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[_apply_params("upsample_nearest2d", 4, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[_apply_params("upsample_nearest3d", 5, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[_apply_params("upsample_linear1d", 3, "linear")], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[_apply_params("upsample_bilinear2d", 4, "linear")], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[_apply_params("upsample_trilinear3d", 5, "linear")], +) +@_beartype.beartype +def _interpolate(name, dim, interpolate_mode): + @symbolic_helper.quantized_args(True, False, False) + @_beartype.beartype + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = symbolic_helper._get_interpolate_attributes( + g, interpolate_mode, args + ) + symbolic_helper._interpolate_warning(interpolate_mode) + align_corners = symbolic_helper._maybe_get_scalar(align_corners) + if align_corners: + return symbolic_helper._unimplemented(name, "align_corners == True", input) + if scales is None: + scales = symbolic_helper._interpolate_size_to_scales( + g, input, output_size, dim + ) + return g.op("Resize", input, scales, mode_s=interpolate_mode) + + return symbolic_fn + + +@_onnx_symbolic("aten::__interpolate") +@_beartype.beartype +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + scales, mode = symbolic_helper._interpolate_get_scales_and_mode( + g, input, size, scale_factor, mode, align_corners + ) + return g.op("Resize", input, scales, mode_s=mode) + + +@_beartype.beartype +def _slice( + g: jit_utils.GraphContext, + input: torch._C.Value, + axes: Union[List, torch.Tensor, torch._C.Value], + starts: Union[List, torch.Tensor, torch._C.Value], + ends: Union[List, torch.Tensor, torch._C.Value], + steps: Optional[Union[List, torch.Tensor, torch._C.Value]] = None, +): + def is_none_value(value): + if value is None: + return True + return ( + isinstance(value, torch._C.Value) + and value.node().kind() == "prim::Constant" + and isinstance(value.type(), _C.NoneType) + ) + + def to_slice_input(list_or_value, default_value=None): + # Convert input param into a 1D torch.Value. + if is_none_value(list_or_value) and default_value is not None: + list_or_value = [default_value] + + if isinstance(list_or_value, (list, torch.Tensor)): + return g.op("Constant", value_t=torch.tensor(list_or_value)) + + rank = symbolic_helper._get_tensor_rank(list_or_value) + if rank == 0: + return symbolic_helper._unsqueeze_helper(g, list_or_value, [0]) + if rank == 1: + return list_or_value + raise errors.SymbolicValueError( + f"Rank must be 0 or 1, not {rank}", list_or_value + ) + + def get_const_value(list_or_value): + if isinstance(list_or_value, (list, torch.Tensor)): + if len(list_or_value) == 1: + return list_or_value[0] + return None + return symbolic_helper._maybe_get_const(list_or_value, "i") + + # Check if slice is a no-op + if ( + get_const_value(starts) == 0 + and get_const_value(ends) == _constants.INT64_MAX + and (steps is None or get_const_value(steps) == 1) + ): + return input + + axes = to_slice_input(axes) + starts = to_slice_input(starts, default_value=0) + ends = to_slice_input(ends, default_value=_constants.INT64_MAX) + if steps is None: + return g.op("Slice", input, starts, ends, axes) + steps = to_slice_input(steps, default_value=1) + return g.op("Slice", input, starts, ends, axes, steps) + + +@_onnx_symbolic("aten::slice") +@_beartype.beartype +def slice(g: jit_utils.GraphContext, self, *args): + if len(args) == 4: + # aten::slice(Tensor self, int dim, int? start=None, int? end=None, int step=1) -> Tensor + dims, start, end, step = args + elif len(args) == 3: + # aten::slice(t[] l, int? start=None, int? end=None, int step=1) -> t[] + start, end, step = args + dims = [0] + else: + raise errors.SymbolicValueError("Unknown aten::slice signature", self) + + return symbolic_helper._slice_helper( + g, + self, + axes=dims, + starts=start, + ends=end, + steps=step, + ) + + +@_onnx_symbolic("aten::flip") +@symbolic_helper.parse_args("v", "is") +@_beartype.beartype +def flip(g: jit_utils.GraphContext, input, dims): + return symbolic_helper._slice_helper( + g, + input, + axes=dims, + starts=[-1] * len(dims), + ends=[-_constants.INT64_MAX] * len(dims), + steps=[-1] * len(dims), + ) + + +@_onnx_symbolic("aten::fmod") +@_beartype.beartype +def fmod(g: jit_utils.GraphContext, input, other): + return g.op("Mod", input, other, fmod_i=1) + + +@_onnx_symbolic("aten::embedding_bag") +@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i") +@_beartype.beartype +def embedding_bag( + g: jit_utils.GraphContext, + embedding_matrix, + indices, + offsets, + scale_grad_by_freq, + mode, + sparse, + per_sample_weights, + include_last_offset, + padding_idx, +): + if scale_grad_by_freq and GLOBALS.export_training: + return symbolic_helper._onnx_unsupported( + "embedding_bag with scale_grad_by_freq for training mode" + ) + if padding_idx is not None and padding_idx >= 0: + raise RuntimeError("embedding_bag with padding_idx") + + warnings.warn( + "Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. " + "Please use opset 11 or higher to export model for dynamic input shape.'" + ) + offsets_dim_0 = symbolic_helper._get_tensor_dim_size(offsets, 0) + if offsets_dim_0 is not None: + if include_last_offset: + offset_len = offsets_dim_0 - 1 + offsets_extended = offsets + else: + offset_len = offsets_dim_0 + offsets_extended = [ + offsets, + g.op("Constant", value_t=torch.tensor([sys.maxsize])), + ] + offsets_extended = g.op("Concat", *offsets_extended, axis_i=0) + list_ = [] + for i in range(offset_len): + start_ = symbolic_helper._unsqueeze_helper( + g, + opset9.select(g, offsets_extended, torch.tensor(0), torch.tensor(i)), + [0], + ) + end_ = symbolic_helper._unsqueeze_helper( + g, + opset9.select( + g, offsets_extended, torch.tensor(0), torch.tensor(i + 1) + ), + [0], + ) + axes_ = g.op("Constant", value_t=torch.tensor([0])) + indices_row = g.op("Slice", indices, start_, end_, axes_) + + embeddings = g.op("Gather", embedding_matrix, indices_row) + if not symbolic_helper._is_none(per_sample_weights): + per_sample_weights_row = g.op( + "Slice", per_sample_weights, start_, end_, axes_ + ) + per_sample_weights_row = symbolic_helper._unsqueeze_helper( + g, per_sample_weights_row, [1] + ) + embeddings = g.op("Mul", embeddings, per_sample_weights_row) + if mode == 0: + embeddings = symbolic_helper._reducesum_helper( + g, embeddings, axes_i=[0], keepdims_i=0 + ) + elif mode == 1: + embeddings = g.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0) + else: + embeddings = g.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0) + + embeddings = symbolic_helper._unsqueeze_helper(g, embeddings, [0]) + list_.append(embeddings) + + output = g.op("Concat", *list_, axis_i=0) + # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices. + # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag. + return output, None, None, None + else: + return symbolic_helper._onnx_unsupported( + "embedding_bag with unknown shape of offsets for opset 10 is not supported. " + "please use opset 11 or higher." + ) + + +@_onnx_symbolic("aten::fake_quantize_per_tensor_affine") +@symbolic_helper.parse_args("v", "v", "v", "i", "i") +@_beartype.beartype +def fake_quantize_per_tensor_affine( + g: jit_utils.GraphContext, + inputs, + scale, + zero_point, + quant_min=-128, + quant_max=127, +): + # NOTE: (0, 127) is a special case. PyTorch restricts activations to be in the range (0, 127). + # https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422 + if (quant_min, quant_max) == (0, 127): + symbolic_helper._onnx_opset_unsupported_detailed( + "fake_quantize_per_tensor_affine", + 10, + 13, + "Quantize range (0, 127) not supported, requires opset 13 Clip", + inputs, + ) + if (quant_min, quant_max) not in [(0, 255), (-128, 127)]: + raise errors.SymbolicValueError( + f"For (quant_min, quant_max), ONNX allows only (0, 255) and (-128, 127). " + f"Got ({quant_min}, {quant_max})", + inputs, + ) + scale = symbolic_helper._maybe_get_scalar(scale) + if scale is None: + symbolic_helper._onnx_opset_unsupported_detailed( + "fake_quantize_per_tensor_affine", + 10, + 13, + "Non-constant scale not supported", + inputs, + ) + scale = scale.float().data # Avoid exporter generating double type + if quant_min == 0: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8) + else: + zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8) + return g.op( + "DequantizeLinear", + g.op("QuantizeLinear", inputs, scale, zero_point), + scale, + zero_point, + ) + + +@_onnx_symbolic("aten::isinf") +@_beartype.beartype +def isinf(g: jit_utils.GraphContext, input): + return g.op("IsInf", g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.DOUBLE)) + + +@_onnx_symbolic("aten::isfinite") +@_beartype.beartype +def isfinite(g: jit_utils.GraphContext, input): + inf_node = isinf(g, input) + nan_node = opset9.isnan(g, input) + return opset9.__not_(g, opset9.__or_(g, inf_node, nan_node)) + + +@_onnx_symbolic("aten::quantize_per_tensor") +@_beartype.beartype +def quantize_per_tensor(g: jit_utils.GraphContext, input, scale, zero_point, dtype): + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + # TODO(justinchuby): Extract all the cast ops into a helper function. + zero_point = g.op( + "Cast", zero_point, to_i=_type_utils.JitScalarType(dtype).onnx_type() + ) + scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT) + return symbolic_helper.quantize_helper(g, input, scale, zero_point) + + +@_onnx_symbolic("aten::dequantize") +@_beartype.beartype +def dequantize(g: jit_utils.GraphContext, input): + return symbolic_helper.dequantize_helper(g, input)[0] + + +@_onnx_symbolic("aten::nan_to_num") +@symbolic_helper.parse_args("v", "f", "f", "f") +@_beartype.beartype +def nan_to_num(g: jit_utils.GraphContext, input, nan, posinf, neginf): + # Cannot create a int type tensor with inf/nan values, so we simply + # return the original tensor + if not symbolic_helper._is_fp(input): + return input + input_dtype = _type_utils.JitScalarType.from_value(input).dtype() + if nan is None: + nan = 0.0 + nan_cond = opset9.isnan(g, input) + nan_result = g.op( + "Where", + nan_cond, + g.op("Constant", value_t=torch.tensor([nan], dtype=input_dtype)), + input, + ) + + # For None values of posinf, neginf we use the greatest/lowest finite + # value representable by input’s dtype. + finfo = torch.finfo(input_dtype) + if posinf is None: + posinf = finfo.max + posinf_cond = opset9.logical_and( + g, + isinf(g, nan_result), + opset9.gt(g, nan_result, g.op("Constant", value_t=torch.LongTensor([0]))), + ) + nan_posinf_result = g.op( + "Where", + posinf_cond, + g.op("Constant", value_t=torch.tensor([posinf], dtype=input_dtype)), + nan_result, + ) + + if neginf is None: + neginf = finfo.min + neginf_cond = opset9.logical_and( + g, + isinf(g, nan_posinf_result), + opset9.lt( + g, nan_posinf_result, g.op("Constant", value_t=torch.LongTensor([0])) + ), + ) + return g.op( + "Where", + neginf_cond, + g.op("Constant", value_t=torch.tensor([neginf], dtype=input_dtype)), + nan_posinf_result, + ) + + +# Quantized symbolics --------------------------------------------------------- +# https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export +# Support starts from opset 10 because `DequantizeLinear` and `QuantizeLinear` were +# introduced in opset version 10. +@_onnx_symbolic("quantized::linear") +@_beartype.beartype +def quantized_linear( + g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.linear(g, input, weight, bias) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::linear_relu") +@_beartype.beartype +def quantized_linear_relu( + g: jit_utils.GraphContext, q_input, q_weight, bias, op_scale, op_zero_point +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.linear(g, input, weight, bias) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::add") +@_beartype.beartype +def quantized_add(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + y, _, _, _ = symbolic_helper.dequantize_helper(g, y) + + output = opset9.add(g, x, y) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::add_relu") +@_beartype.beartype +def quantized_add_relu(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + y, _, _, _ = symbolic_helper.dequantize_helper(g, y) + + output = opset9.add(g, x, y) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::mul") +@_beartype.beartype +def quantized_mul(g: jit_utils.GraphContext, x, y, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + y, _, _, _ = symbolic_helper.dequantize_helper(g, y) + + output = opset9.mul(g, x, y) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::hardswish") +@_beartype.beartype +def quantized_hardswish(g: jit_utils.GraphContext, x, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.hardswish(g, x) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::sigmoid") +@_beartype.beartype +def quantized_sigmoid(g: jit_utils.GraphContext, x, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.sigmoid(g, x) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::leaky_relu") +@_beartype.beartype +def quantized_leaky_relu( + g: jit_utils.GraphContext, x, negative_slope, inplace, op_scale, op_zero_point +): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.leaky_relu(g, x, negative_slope, inplace) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::layer_norm") +@_beartype.beartype +def quantized_layer_norm( + g: jit_utils.GraphContext, + x, + normalized_shape, + weight, + bias, + eps, + op_scale, + op_zero_point, +): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.layer_norm(g, x, normalized_shape, weight, bias, eps, False) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::group_norm") +@_beartype.beartype +def quantized_group_norm( + g: jit_utils.GraphContext, + x, + num_groups, + weight, + bias, + eps, + op_scale, + op_zero_point, +): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = opset9.group_norm(g, x, num_groups, weight, bias, eps, False) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::instance_norm") +@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v") +@_beartype.beartype +def quantized_instance_norm( + g: jit_utils.GraphContext, + q_input, + weight, + bias, + eps, + op_scale, + op_zero_point, +): + input, _, _, _ = symbolic_helper.dequantize_helper(g, q_input) + + output = opset9.instance_norm( + g, input, weight, bias, None, None, False, 0.0, eps, False + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv1d_relu") +@_beartype.beartype +def quantized_conv1d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv2d_relu") +@_beartype.beartype +def quantized_conv2d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv3d_relu") +@_beartype.beartype +def quantized_conv3d_relu( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups) + output = opset9.relu(g, output) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv1d") +@_beartype.beartype +def quantized_conv1d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv1d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv2d") +@_beartype.beartype +def quantized_conv2d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv2d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv3d") +@_beartype.beartype +def quantized_conv3d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv3d(g, input, weight, bias, stride, padding, dilation, groups) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose1d") +@_beartype.beartype +def quantized_conv_transpose1d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose2d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose2d") +@_beartype.beartype +def quantized_conv_transpose2d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose2d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::conv_transpose3d") +@_beartype.beartype +def quantized_conv_transpose3d( + g: jit_utils.GraphContext, + q_input, + q_weight, + bias, + stride, + padding, + output_padding, + dilation, + groups, + op_scale, + op_zero_point, +): + input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input) + weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight) + q_bias = symbolic_helper.requantize_bias_helper(g, bias, input_scale, weight_scale) + bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias) + + output = opset9.conv_transpose3d( + g, input, weight, bias, stride, padding, output_padding, groups, dilation + ) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +@_onnx_symbolic("quantized::cat") +@symbolic_helper.parse_args("v", "i", "v", "v") +@_beartype.beartype +def quantized_cat( + g: jit_utils.GraphContext, + q_inputs: _C.Value, + dim: int, + op_scale: _C.Value, + op_zero_point: _C.Value, +) -> _C.Value: + unpacked_inputs = symbolic_helper._unpack_list(q_inputs) + dequantized = [ + symbolic_helper.dequantize_helper(g, input)[0] for input in unpacked_inputs + ] + concatenated = g.op("Concat", *dequantized, axis_i=dim) + return symbolic_helper.quantize_helper(g, concatenated, op_scale, op_zero_point) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py new file mode 100644 index 0000000000000000000000000000000000000000..f3b2f6722caf4bb7e24a16141adcb5560acb7a06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.py @@ -0,0 +1,288 @@ +"""This file exports ONNX ops for opset 14. + +Note [ONNX operators that are added/updated in opset 14] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +New operators: + HardSwish, Trilu + +Updated operators: + Reshape + Add, Sub, Mul, Div + GRU, LSTM, RNN + BatchNorm, Cumsum, Relu +""" + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md +from __future__ import annotations + +import functools +from typing import Optional + +import torch +from torch.onnx import _constants, _type_utils, symbolic_helper +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, jit_utils, registration + +__all__ = [ + "hardswish", + "tril", + "triu", + "reshape", + "batch_norm", + "quantized_hardswish", + "scaled_dot_product_attention", +] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=14) + + +@_onnx_symbolic("aten::hardswish") +@symbolic_helper.parse_args("v") +@_beartype.beartype +def hardswish(g: jit_utils.GraphContext, self): + return g.op("HardSwish", self) + + +@_onnx_symbolic("aten::tril") +@_beartype.beartype +def tril(g: jit_utils.GraphContext, self, diagonal, out=None): + return g.op("Trilu", self, diagonal, upper_i=0) + + +@_onnx_symbolic("aten::triu") +@_beartype.beartype +def triu(g: jit_utils.GraphContext, self, diagonal, out=None): + return g.op("Trilu", self, diagonal, upper_i=1) + + +@_onnx_symbolic("aten::reshape") +@symbolic_helper.quantized_args(True) +@symbolic_helper.parse_args("v", "v") +@_beartype.beartype +def reshape(g: jit_utils.GraphContext, self, shape): + # NOTE: Due to bug in ORT https://github.com/microsoft/onnxruntime/issues/10664 + # Reshape export cannot utilize the new allowzero attribute introduced in opset 14. + return symbolic_helper._reshape_helper(g, self, shape, allowzero=0) + + +@_onnx_symbolic("aten::batch_norm") +@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i") +@_beartype.beartype +def batch_norm( + g: jit_utils.GraphContext, + input, + weight, + bias, + running_mean, + running_var, + training, + momentum, + eps, + cudnn_enabled, +): + if ( + torch.is_autocast_enabled() + and not symbolic_helper.args_have_same_dtype( + [input, weight, bias, running_mean, running_var] + ) + and GLOBALS.export_onnx_opset_version < 15 + ): + return symbolic_helper._onnx_opset_unsupported_detailed( + "BatchNormalization", + 14, + 15, + "All input tensors must have the same `dtype`." + " Turn off Autocast or export using opset version 15.", + input, + ) + + symbolic_helper.check_training_mode(training, "batch_norm") + weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper( + g, input, weight, bias, running_mean, running_var + ) + out = g.op( + "BatchNormalization", + input, + weight, + bias, + running_mean, + running_var, + epsilon_f=eps, + momentum_f=1 - momentum, + training_mode_i=0 if not training else 1, + outputs=1 if not training else 3, + ) + if not training: + return out + else: + res, new_running_mean, new_running_var = out + new_running_mean.setType(running_mean.type()) + new_running_var.setType(running_var.type()) + return res + + +@_onnx_symbolic("quantized::hardswish") +@_beartype.beartype +def quantized_hardswish(g: jit_utils.GraphContext, x, op_scale, op_zero_point): + x, _, _, _ = symbolic_helper.dequantize_helper(g, x) + + output = hardswish(g, x) + + return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point) + + +# Ported from +# https://github.com/microsoft/onnxscript/blob/6b1b81700b4523f31d8c6d3321e5d8ef5d42b764/onnxscript/function_libs/torch_aten/ops/nn.py#L1504 +# aten_scaled_dot_product_attention +# NOTE: Need op.Trilu +@_onnx_symbolic("aten::scaled_dot_product_attention") +@symbolic_helper.parse_args("v", "v", "v", "v", "f", "b", "v") +@_beartype.beartype +def scaled_dot_product_attention( + g: jit_utils.GraphContext, + query: torch._C.Value, + key: torch._C.Value, + value: torch._C.Value, + attn_mask: Optional[torch._C.Value] = None, + dropout_p: float = 0.0, + is_causal: bool = False, + scale: Optional[torch._C.Value] = None, +): + assert (not is_causal) or ( + is_causal and symbolic_helper._is_none(attn_mask) + ), "is_causal and attn_mask cannot be set at the same time" + + scale = symbolic_helper._maybe_get_const(scale, "f") + if symbolic_helper._is_none(scale): + scale = _attention_scale(g, query) + + if is_causal: + attn_mask = _causal_attention_mask(g, query, key) + + # Swap the last two axes of key + # NOTE: onnx-script has different logic here, because the attribute perms in + # transpose needs list of ints + key_shape_builtin = symbolic_helper._get_tensor_rank(key) + key_transposed_axes = list(range(key_shape_builtin)) + key_transposed_axes[-1], key_transposed_axes[-2] = ( + key_transposed_axes[-2], + key_transposed_axes[-1], + ) + key_transposed = g.op("Transpose", key, perm_i=key_transposed_axes) + + # https://github.com/pytorch/pytorch/blob/12da0c70378b5be9135c6fda62a9863bce4a4818/aten/src/ATen/native/transformers/attention.cpp#L653 + # Scale q, k before matmul for stability see https://tinyurl.com/sudb9s96 for math + query_scaled = g.op("Mul", query, g.op("Sqrt", scale)) + key_transposed_scaled = g.op("Mul", key_transposed, g.op("Sqrt", scale)) + mul_qk = g.op("MatMul", query_scaled, key_transposed_scaled) + + if symbolic_helper._is_none(attn_mask): + mul_qk_add = mul_qk + elif ( + _type_utils.JitScalarType.from_value(attn_mask) + == _type_utils.JitScalarType.BOOL + ): + # Turn the Boolean mask to float: attn_mask.masked_fill(not attn_mask, -float('inf')) + const_zero = g.op("Constant", value_t=torch.tensor([0.0])) + const_neg_inf = g.op("Constant", value_t=torch.tensor([-float("inf")])) + attn_mask = g.op("Where", attn_mask, const_zero, const_neg_inf) + mul_qk_add = g.op("Add", mul_qk, attn_mask) + elif _type_utils.JitScalarType.from_value(attn_mask) in ( + _type_utils.JitScalarType.FLOAT, + _type_utils.JitScalarType.HALF, + ): + mul_qk_add = g.op("Add", mul_qk, attn_mask) + else: + raise ValueError( + f"Unsupported type for attn_mask: {_type_utils.JitScalarType.from_value(attn_mask)}" + ) + + attn_weight = g.op("Softmax", mul_qk_add, axis_i=-1) + + if dropout_p != 0: + attn_weight = g.op( + "Dropout", + attn_weight, + g.op("Constant", value_t=torch.tensor(dropout_p, dtype=torch.float)), + ) + + return g.op("MatMul", attn_weight, value) + + +@_beartype.beartype +def _attention_scale( + g: jit_utils.GraphContext, query: torch._C.Value +) -> torch._C.Value: + """Calculate the scale factor for the attention result. + + Args: + query: Tensor of shape [..., L, E] + + Returns: + Scalar scale factor := 1 / math.sqrt(query.size(-1)) + """ + query_shape = g.op("Shape", query) + query_shape_last = g.op( + "Slice", + query_shape, + g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)), + g.op( + "Constant", value_t=torch.tensor([_constants.INT64_MAX], dtype=torch.int64) + ), + ) + embedding_size = g.op( + "Cast", + query_shape_last, + to_i=_type_utils.JitScalarType.from_value(query).onnx_type(), + ) + const_one = g.op("Constant", value_t=torch.tensor([1.0], dtype=torch.float)) + scale = g.op("Div", const_one, g.op("Sqrt", embedding_size)) + # Add a Cast to convert the scale back to original type + scale = g.op( + "Cast", + scale, + to_i=_type_utils.JitScalarType.from_value(query).onnx_type(), + ) + return scale + + +@_beartype.beartype +def _causal_attention_mask( + g: jit_utils.GraphContext, query: torch._C.Value, key: torch._C.Value +) -> torch._C.Value: + """Create a causal mask for the given query and key tensors. + + Equivalent to:: + mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0) + attn_mask = torch.zeros(L, S, dtype=torch.float) + attn_mask = attn_mask.masked_fill(not mask, -float('inf')) + + Args: + query: Tensor of shape [..., L, E] + key: Tensor of shape [..., S, E] + + Returns: + Tensor of shape [L, S] + """ + + query_shape = g.op("Shape", query) + key_shape = g.op("Shape", key) + + last_idx = g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)) + second_last_idx = g.op("Constant", value_t=torch.tensor([-2], dtype=torch.int64)) + target_length = g.op("Slice", query_shape, second_last_idx, last_idx) + source_length = g.op("Slice", key_shape, second_last_idx, last_idx) + # attn_mask = torch.ones(L, S) := { + size = g.op("Concat", target_length, source_length, axis_i=0) + const_one = g.op("Constant", value_t=torch.tensor([1.0])) + attn_mask = g.op("Expand", const_one, size) + # } + attn_mask = g.op("Trilu", attn_mask, upper_i=0) + # The causal mask has 0s in the lower triangle and -inf in the upper triangle. + const_zero = g.op("Constant", value_t=torch.tensor([0.0])) + const_neg_inf = g.op("Constant", value_t=torch.tensor([-float("inf")])) + attn_mask = g.op( + "Where", g.op("Equal", attn_mask, const_zero), const_neg_inf, const_zero + ) + return attn_mask diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset15.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset15.py new file mode 100644 index 0000000000000000000000000000000000000000..4f316a77f62e65c2a5ef55e59f85bffa982a0564 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset15.py @@ -0,0 +1,82 @@ +"""This file exports ONNX ops for opset 15. + +Note [ONNX operators that are added/updated in opset 15] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +https://github.com/onnx/onnx/blob/master/docs/Changelog.md#version-15-of-the-default-onnx-operator-set +New operators: + Bernoulli + CastLike + Optional + OptionalGetElement + OptionalHasElement + +Updated operators: + BatchNormalization https://github.com/onnx/onnx/pull/3545 + Backwards compatible + TODO: test coverage for mixed types inputs. + Pow https://github.com/onnx/onnx/pull/3412 + Backwards compatible + TODO: bfloat16 support. + Shape https://github.com/onnx/onnx/pull/3580 + Backwards compatible + TODO: optional start/end attribute. +""" + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +import functools + +import torch +from torch import _C +from torch.onnx import symbolic_helper, symbolic_opset9 as opset9 +from torch.onnx._internal import _beartype, jit_utils, registration + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=15) + + +@_onnx_symbolic("aten::__is_") +@_beartype.beartype +def aten__is_(g: jit_utils.GraphContext, self, other): + if symbolic_helper._is_none(other): + if isinstance(self.type(), _C.OptionalType): + none = g.op("OptionalHasElement", self) + return g.op("Not", none) + else: + return g.op("Constant", value_t=torch.BoolTensor([0])) + return opset9.eq(g, self, other) + + +@_onnx_symbolic("aten::__isnot_") +@opset9.wrap_logical_op_with_negation # type: ignore[has-type] +@_beartype.beartype +def aten__isnot_(g: jit_utils.GraphContext, self, other): + return aten__is_(g, self, other) + + +@_onnx_symbolic("aten::bernoulli") +@_beartype.beartype +def bernoulli(g: jit_utils.GraphContext, input, p=None, generator=None, out=None): + if out is not None and not symbolic_helper._is_none(out): + symbolic_helper._unimplemented( + "Bernoulli", "out parameter is not supported for bernoulli", input + ) + if generator is not None and not symbolic_helper._is_none(generator): + symbolic_helper._unimplemented( + "Bernoulli", "generator is not supported for bernoulli", input + ) + if p is None or symbolic_helper._is_none(p): + return g.op("Bernoulli", input) + return opset9.bernoulli(g, input, p, generator, out) + + +@_onnx_symbolic("prim::unchecked_cast") +@_beartype.beartype +def prim_unchecked_cast(g: jit_utils.GraphContext, self): + # exists to refine the type of the Value + # if x is Optional[Tensor], unchecked_cast will cast + # x to Tensor, so the rest of the graph knows that x is a Tensor. + if isinstance(self.type(), _C.OptionalType): + return g.op("OptionalGetElement", self) + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset17.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset17.py new file mode 100644 index 0000000000000000000000000000000000000000..3aad249a1126556f3cb0834991c82ea19650ac31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset17.py @@ -0,0 +1,211 @@ +"""This file exports ONNX ops for opset 17. + +Note [ONNX Operators that are added/updated in opset 17] + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-17-of-the-default-onnx-operator-set +New operators: + BlackmanWindow + DFT + HammingWindow + HannWindow + LayerNormalization + MelWeightMatrix + STFT + SequenceMap +""" + +import functools +from typing import Optional, Sequence + +import torch +from torch import _C +from torch.onnx import _type_utils, errors, symbolic_helper +from torch.onnx._internal import _beartype, jit_utils, registration + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in README.md + +__all__ = ["layer_norm", "stft"] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=17) + + +@_onnx_symbolic("aten::layer_norm") +@symbolic_helper.parse_args("v", "is", "v", "v", "f", "none") +def layer_norm( + g: jit_utils.GraphContext, + input: _C.Value, + normalized_shape: Sequence[int], + weight: _C.Value, + bias: _C.Value, + eps: float, + cudnn_enable: bool, +): + # normalized_shape: input shape from an expected input of size + # axis: The first normalization dimension. + # layer_norm normalizes on the last D dimensions, + # where D is the size of normalized_shape + axis = -len(normalized_shape) + scalar_type = _type_utils.JitScalarType.from_value( + input, _type_utils.JitScalarType.FLOAT + ) + dtype = scalar_type.dtype() + if symbolic_helper._is_none(weight): + weight_value = torch.ones(normalized_shape, dtype=dtype) + weight = g.op("Constant", value_t=weight_value) + if symbolic_helper._is_none(bias): + bias_value = torch.zeros(normalized_shape, dtype=dtype) + bias = g.op("Constant", value_t=bias_value) + return g.op( + "LayerNormalization", + input, + weight, + bias, + epsilon_f=eps, + axis_i=axis, + ) + + +def _compute_edge_sizes(n_fft, window_size): + """Helper function to compute the sizes of the edges (left and right) + of a given window centered within an FFT size.""" + left = (n_fft - window_size) // 2 + right = n_fft - left - window_size + return left, right + + +@_onnx_symbolic("aten::stft") +@symbolic_helper.parse_args("v", "i", "i", "i", "v", "b", "b", "b") +@_beartype.beartype +def stft( + g: jit_utils.GraphContext, + input: _C.Value, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[_C.Value] = None, + normalized: bool = False, + onesided: Optional[bool] = True, + return_complex: Optional[bool] = False, +) -> _C.Value: + """Associates `torch.stft` with the `STFT` ONNX operator. + Note that torch.stft calls _VF.stft, without centering or padding options. + Hence, this function does not contain these two arguments. + See torch.stft source code for more info. + + Args: + g: Graph to write the ONNX representation into + input: Input tensor for the transformation + n_fft: FFT size + hop_length: Size of the hop. Defaults to `floot(n_fft // 4)` + win_length: Size of the analysis window. Defaults to `n_fft` + window: Analysis window. Defaults to a window of all ones + normalized: Whether to return a normalized STFT + onesided: Whether to return only half (+1) of the results, given the + symmetry of the STFT + return_complex: Whether to return the complex value (Note: Must be + `False` or `None`) + + Returns: + op: Operator for torch.stft associated with STFT (ONNX) + """ + # Checks + if return_complex: + raise errors.SymbolicValueError( + msg="STFT does not currently support complex types", value=input + ) + + # Get STFT sizes + frame_step_value = hop_length if hop_length is not None else n_fft // 4 + frame_step_const = g.op( + "Constant", value_t=torch.tensor(frame_step_value, dtype=torch.int64) + ) + frame_length_const = g.op( + "Constant", value_t=torch.tensor(n_fft, dtype=torch.int64) + ) + + # Pre-process input if needed + signal = input + signal_rank = symbolic_helper._get_tensor_rank(signal) + if signal_rank == 1: + # Add batch dimension + signal = g.op( + "Unsqueeze", + signal, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + elif signal_rank > 2: + raise errors.SymbolicValueError( + msg="STFT can only take inputs of 1 [signal] or 2 [batch, signal] dimensions. " + f"Current rank of signal is {signal_rank}, please reduce it.", + value=input, + ) + + # Get window and make sure it's the same size as `win_length` or `n_fft` + n_win = symbolic_helper._get_tensor_dim_size(window, dim=0) + if n_win is not None: + win_length_default = win_length if win_length else n_fft + assert n_win == win_length_default, ( + "Analysis window size must equal `win_length` or `n_fft`. " + f"Please, set `win_length` or `n_fft` to match `window` size ({n_win})", + ) + + # Center window around zeros if needed (required by ONNX's STFT) + if n_win < n_fft: + left, right = _compute_edge_sizes(n_fft, n_win) + left_win = g.op("Constant", value_t=torch.zeros(left)) + right_win = g.op("Constant", value_t=torch.zeros(right)) + window = g.op("Concat", left_win, window, right_win, axis_i=0) + + # Create window, if needed + if symbolic_helper._is_none(window): + if win_length: + if win_length > n_fft: + raise errors.SymbolicValueError( + msg="The analysis window can't be longer than the size of the FFT. " + f"Please set `win_length` ({win_length}) to `n_fft` ({n_fft}) or less.", + value=input, + ) + + # Center window, if needed + left, right = _compute_edge_sizes(n_fft, win_length) + torch_window = torch.hstack( + (torch.zeros(left), torch.ones(win_length), torch.zeros(right)) + ) + else: + # Rectangle window + torch_window = torch.ones(n_fft) + assert torch_window.shape[0] == n_fft + window = g.op("Constant", value_t=torch_window) + window = g.op( + "Cast", window, to_i=_type_utils.JitScalarType.from_value(signal).onnx_type() + ) + + # Run STFT + result = g.op( + "STFT", + signal, + frame_step_const, + window, + frame_length_const, + onesided_i=1 if onesided is None or onesided else 0, + ) + + # Transpose to mimic torch.stft's behavior + result = g.op("Transpose", result, perm_i=[0, 2, 1, 3]) + + # Remove batch dimension, if needed + if signal_rank == 1: + result = g.op( + "Squeeze", + result, + g.op("Constant", value_t=torch.tensor([0], dtype=torch.int64)), + ) + + # Normalize, if needed + if normalized: + sqrt_nfft = torch.sqrt(torch.tensor(n_fft, dtype=signal.type().dtype())) + result = g.op("Div", result, g.op("Constant", value_t=sqrt_nfft)) + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset18.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset18.py new file mode 100644 index 0000000000000000000000000000000000000000..dee33785d0b2946b013b5bd66527c16a0963b5b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset18.py @@ -0,0 +1,70 @@ +"""This file exports ONNX ops for opset 18. + +Note [ONNX Operators that are added/updated in opset 18] + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-18-of-the-default-onnx-operator-set +New operators: + CenterCropPad + Col2Im + Mish + OptionalGetElement + OptionalHasElement + Pad + Resize + ScatterElements + ScatterND +""" + +import functools +from typing import Sequence + +from torch import _C +from torch.onnx import symbolic_helper +from torch.onnx._internal import _beartype, registration + +# EDITING THIS FILE? READ THIS FIRST! +# see Note [Edit Symbolic Files] in symbolic_helper.py + +__all__ = ["col2im"] + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=18) + + +@_onnx_symbolic("aten::col2im") +@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is") +@_beartype.beartype +def col2im( + g, + input: _C.Value, + output_size: _C.Value, + kernel_size: _C.Value, + dilation: Sequence[int], + padding: Sequence[int], + stride: Sequence[int], +): + # convert [i0, i1, ..., in] into [i0, i0, i1, i1, ..., in, in] + adjusted_padding = [] + for pad in padding: + for _ in range(2): + adjusted_padding.append(pad) + + num_dimensional_axis = symbolic_helper._get_tensor_sizes(output_size)[0] + if not adjusted_padding: + adjusted_padding = [0, 0] * num_dimensional_axis + + if not dilation: + dilation = [1] * num_dimensional_axis + + if not stride: + stride = [1] * num_dimensional_axis + + return g.op( + "Col2Im", + input, + output_size, + kernel_size, + dilations_i=dilation, + pads_i=adjusted_padding, + strides_i=stride, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset7.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset7.py new file mode 100644 index 0000000000000000000000000000000000000000..0537e8a92888857b68eaca9d7a90c02c2261cb8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset7.py @@ -0,0 +1,66 @@ +""" +Note [ONNX operators that are added/updated from opset 7 to opset 8] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +New operators: + Expand + +Updated operators: + Min, Max, Sum, Mean: supports multidirectional broadcasting. + MaxPool: added optional indices output. + Scan +""" + +import functools +import warnings + +from torch.onnx import symbolic_helper, symbolic_opset9 as opset9 +from torch.onnx._internal import jit_utils, registration + + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=7) + +block_listed_operators = ( + "scan", + "expand", + "expand_as", + "meshgrid", + "adaptive_max_pool1d", + "adaptive_max_pool2d", + "adaptive_max_pool3d", + "max_pool1d_with_indices", + "max_pool2d_with_indices", + "max_pool3d_with_indices", +) + + +# NOTE: max, min, sum, mean: broadcasting is not supported in opset 7. +# torch.max (same for torch.min) actually has two interfaces smashed together: +# torch.max(x, dim, keepdim) and torch.max(x, y) +@_onnx_symbolic("aten::max") +def max(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None): + # torch.max(input, other) + if keepdim is None and dim_or_y is not None: + warnings.warn( + "Multidirectional broadcasting is not supported in opset 7. " + "This might cause the onnx model to be incorrect, if inputs to max operators " + "have different shapes" + ) + return opset9.max(g, self, dim_or_y, keepdim) + + +@_onnx_symbolic("aten::min") +def min(g: jit_utils.GraphContext, self, dim_or_y=None, keepdim=None): + # torch.min(input, other) + if keepdim is None and dim_or_y is not None: + warnings.warn( + "Multidirectional broadcasting is not supported in opset 7. " + "This might cause the onnx model to be incorrect, if inputs to min operators " + "have different shapes" + ) + return opset9.min(g, self, dim_or_y, keepdim) + + +for block_listed_op in block_listed_operators: + _onnx_symbolic(f"aten::{block_listed_op}")( + symbolic_helper._block_list_in_opset(block_listed_op) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py new file mode 100644 index 0000000000000000000000000000000000000000..c7a771c8f894f4394a68da4f42bd6bc26b245f15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/symbolic_opset8.py @@ -0,0 +1,470 @@ +""" +Note [ONNX operators that are added/updated from opset 8 to opset 9] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +New operators: + Compress + ConstantOfShape + EyeLike + MaxUnpool + OneHot + Sinh + Cosh + Asinh + Acosh + Atanh + Shrink + IsNaN + Sign + Erf + Scatter + Where + NonZero + TfIdfVectorizer + MeanVarianceNormalization + +Updated operators: + BatchNormalization: removed spatial attribute. + Greater, Less, Constant, MatMul, PRelu, Gemm, Flatten: more data types{integers} supported. + Cast: more data types{string} supported. + Upsample: moved scales from attribute to input. + Scan +""" + +import functools +import warnings + +import torch +from torch._C import _onnx as _C_onnx +from torch.onnx import _type_utils, errors, symbolic_helper, symbolic_opset9 as opset9 +from torch.onnx._internal import jit_utils, registration + +_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=8) + +block_listed_operators = ( + "nonzero", + "where", + "scatter", + "scatter_add", + "erf", + "sign", + "isnan", + "gather", + "arange", + "masked_fill", + "index_fill", + "index_copy", + "repeat_interleave", + "any", + "all", +) + +for block_listed_op in block_listed_operators: + _onnx_symbolic(f"aten::{block_listed_op}")( + symbolic_helper._block_list_in_opset(block_listed_op) + ) + + +def _apply_params(*args, **kwargs): + """Returns a decorator that calls the decorated (higher-order) function with the given parameters.""" + + def _apply(fn): + return fn(*args, **kwargs) + + return _apply + + +@_onnx_symbolic( + "aten::upsample_nearest1d", + decorate=[_apply_params("upsample_nearest1d", 3, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest2d", + decorate=[_apply_params("upsample_nearest2d", 4, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_nearest3d", + decorate=[_apply_params("upsample_nearest3d", 5, "nearest")], +) +@_onnx_symbolic( + "aten::upsample_linear1d", + decorate=[_apply_params("upsample_linear1d", 3, "linear")], +) +@_onnx_symbolic( + "aten::upsample_bilinear2d", + decorate=[_apply_params("upsample_bilinear2d", 4, "linear")], +) +@_onnx_symbolic( + "aten::upsample_trilinear3d", + decorate=[_apply_params("upsample_trilinear3d", 5, "linear")], +) +def _interpolate(name, dim, interpolate_mode): + def symbolic_fn(g, input, output_size, *args): + scales, align_corners = symbolic_helper._get_interpolate_attributes( + g, interpolate_mode, args + ) + symbolic_helper._interpolate_warning(interpolate_mode) + align_corners = symbolic_helper._maybe_get_scalar(align_corners) + if align_corners: + return symbolic_helper._unimplemented(name, "align_corners == True", input) + output_size = symbolic_helper._maybe_get_const(output_size, "is") + if symbolic_helper._is_value(output_size): + return symbolic_helper._unimplemented( + name, "torch._C.Value (output_size) indexing" + ) + if scales is None: + scales = [ + 1.0 + if i < 2 + else float(output_size[-(dim - i)]) + / float(input.type().sizes()[-(dim - i)]) + for i in range(0, dim) + ] + return g.op("Upsample", input, mode_s=interpolate_mode, scales_f=scales) + + return symbolic_fn + + +@_onnx_symbolic("aten::__interpolate") +def __interpolate( + g: jit_utils.GraphContext, + input, + size, + scale_factor, + mode, + align_corners, + recompute_scale_factor, + antialias, +): + align_corners = symbolic_helper._maybe_get_const(align_corners, "b") + if not symbolic_helper._is_none(align_corners) and align_corners: + return symbolic_helper._unimplemented("interpolate", "align_corners == True") + + if not symbolic_helper._is_none(scale_factor) and symbolic_helper._is_value( + scale_factor + ): + return symbolic_helper._unimplemented( + "interpolate", "dynamic scales in opset 8" + ) + + if not symbolic_helper._is_none(size) and symbolic_helper._is_value(size): + return symbolic_helper._unimplemented("interpolate", "dynamic size in opset 8") + + scales, mode = symbolic_helper._interpolate_get_scales_and_mode( + g, input, size, scale_factor, mode, align_corners + ) + return g.op("Upsample", input, mode_s=mode, scales_f=scales) + + +# NOTE: We should create a wrapper for this kind of operation, after resolving the shape/type propagation +# issue for "cast" operators. Some symbolic functions depend on shape information of input tensor, which +# is lost after casting. +def _try_cast_integer_to_float(g: jit_utils.GraphContext, *args): + floating_scalar_types = { + _type_utils.JitScalarType.HALF, + _type_utils.JitScalarType.FLOAT, + _type_utils.JitScalarType.DOUBLE, + } + old_type = None + # Cast the input tensor to Float if its scalarType is known and is not floating number. + # If casting is performed, return the old scalarType, otherwise return None. + arg0_type = _type_utils.JitScalarType.from_value( + args[0], _type_utils.JitScalarType.UNDEFINED + ) + if arg0_type != _type_utils.JitScalarType.UNDEFINED: + old_type = arg0_type + if old_type not in floating_scalar_types: + old_type = old_type.scalar_name() + args = tuple( + g.op("Cast", arg, to_i=_C_onnx.TensorProtoDataType.FLOAT) + for arg in args + ) + else: + return (None,) + args + else: + warnings.warn( + "Only floating datatype is supported for these operators: " + "{Greater, Less, MatMul, PRelu, Gemm, Flatten}. This might cause " + "the onnx model to be incorrect, if inputs have integer datatypes." + ) + return (old_type,) + args + + +def _cast_to_type(g: jit_utils.GraphContext, input, to_type): + if to_type is None: + return input + return getattr(opset9, f"_cast_{to_type}")(g, input, False) + + +def _comparison_operator(g: jit_utils.GraphContext, input, other, op_name): + other = symbolic_helper._maybe_get_scalar(other) + other = symbolic_helper._if_scalar_type_as(other, input) + _, input, other = _try_cast_integer_to_float(g, input, other) + return g.op(op_name, input, other) + + +# NOTE: For symbolics {gt, lt, bmm, matmul, prelu, mm, addmm, view, flatten}, +# integer input type not supported in opset8. Cast to float if possible. +@_onnx_symbolic("aten::gt") +def gt(g: jit_utils.GraphContext, input, other): + return _comparison_operator(g, input, other, "Greater") + + +@_onnx_symbolic("aten::lt") +def lt(g: jit_utils.GraphContext, input, other): + return _comparison_operator(g, input, other, "Less") + + +@_onnx_symbolic("aten::bmm") +def bmm(g: jit_utils.GraphContext, self, other): + if symbolic_helper._try_get_scalar_type(self): + old_type, self, other = _try_cast_integer_to_float(g, self, other) + return _cast_to_type(g, g.op("MatMul", self, other), old_type) + else: + return g.op("MatMul", self, other) + + +@_onnx_symbolic("aten::matmul") +def matmul(g: jit_utils.GraphContext, self, other): + return bmm(g, self, other) + + +@_onnx_symbolic("aten::prelu") +def prelu(g: jit_utils.GraphContext, self, weight): + self_rank = symbolic_helper._get_tensor_rank(self) + weight_sizes = symbolic_helper._get_tensor_sizes(weight) + if self_rank is not None and self_rank > 2: + weight = g.op("Unsqueeze", weight, axes_i=list(range(1, self_rank - 1))) + elif self_rank == 0 and weight_sizes == [1]: + # self and weight are both scalar but weight has rank == 1, squeeze weight. + weight = symbolic_helper._squeeze_helper(g, weight, [0]) + if symbolic_helper._try_get_scalar_type(self): + old_type, self, weight = _try_cast_integer_to_float(g, self, weight) + return _cast_to_type(g, g.op("PRelu", self, weight), old_type) + else: + return g.op("PRelu", self, weight) + + +@_onnx_symbolic("aten::mm") +def mm(g: jit_utils.GraphContext, self, other): + # Create a dummy C tensor. Only needed for API purposes, the value is + # since beta = 0 + scalar_type = symbolic_helper._try_get_scalar_type(self, other) + if scalar_type is None: + raise errors.SymbolicValueError( + "mm can only operate on tensors with known types", self + ) + zero_constant = g.op( + "Constant", + value_t=torch.tensor([0], dtype=scalar_type.dtype()), + ) + + if symbolic_helper._try_get_scalar_type(self): + old_type, self, other, zero_constant = _try_cast_integer_to_float( + g, self, other, zero_constant + ) + return _cast_to_type( + g, + g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0), + old_type, + ) + return g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0) + + +@_onnx_symbolic("aten::addmm") +@symbolic_helper.parse_args("v", "v", "v", "t", "t") +def addmm(g: jit_utils.GraphContext, self, mat1, mat2, beta, alpha): + if symbolic_helper._try_get_scalar_type(self): + old_type, self, mat1, mat2 = _try_cast_integer_to_float(g, self, mat1, mat2) + return _cast_to_type( + g, + g.op( + "Gemm", + mat1, + mat2, + self, + beta_f=symbolic_helper._scalar(beta), + alpha_f=symbolic_helper._scalar(alpha), + ), + old_type, + ) + else: + return g.op( + "Gemm", + mat1, + mat2, + self, + beta_f=symbolic_helper._scalar(beta), + alpha_f=symbolic_helper._scalar(alpha), + ) + + +@_onnx_symbolic("aten::flatten") +def flatten(g: jit_utils.GraphContext, input, start_dim, end_dim): + start_dim_i = symbolic_helper._get_const(start_dim, "i", "start_dim") + end_dim_i = symbolic_helper._get_const(end_dim, "i", "end_dim") + + dim = input.type().dim() + if end_dim_i < 0: + end_dim_i = dim + end_dim_i + # use ONNX's Flatten operator for cases where the output shape is 2D + if start_dim_i == 1 and end_dim_i == dim - 1: + if symbolic_helper._try_get_scalar_type(input): + old_type, input = _try_cast_integer_to_float(g, input) + return _cast_to_type( + g, g.op("Flatten", input, axis_i=start_dim_i), old_type + ) + else: + return g.op("Flatten", input, axis_i=start_dim_i) + if start_dim_i == 0 and end_dim_i == dim - 2: + if symbolic_helper._try_get_scalar_type(input): + old_type, input = _try_cast_integer_to_float(g, input) + return _cast_to_type( + g, g.op("Flatten", input, axis_i=end_dim_i + 1), old_type + ) + else: + return g.op("Flatten", input, axis_i=end_dim_i + 1) + + return opset9.flatten(g, input, start_dim, end_dim) + + +def _constant_fill(g: jit_utils.GraphContext, sizes, dtype: int, const_value): + if dtype is None: + scalar_type = _type_utils.JitScalarType.FLOAT + else: + scalar_type = _type_utils.JitScalarType(dtype) + if not scalar_type.dtype().is_floating_point: + result = g.op( + "ConstantFill", + sizes, + dtype_i=_type_utils.JitScalarType.FLOAT.onnx_type(), + input_as_shape_i=1, + value_f=const_value, + ) + return g.op("Cast", result, to_i=scalar_type.onnx_type()) + else: + return g.op( + "ConstantFill", + sizes, + dtype_i=scalar_type.onnx_type(), + input_as_shape_i=1, + value_f=const_value, + ) + + +@_onnx_symbolic("aten::empty") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def empty( + g: jit_utils.GraphContext, + sizes, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + return zeros(g, sizes, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::empty_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def empty_like( + g: jit_utils.GraphContext, + input, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + return zeros_like(g, input, dtype, layout, device, pin_memory) + + +@_onnx_symbolic("aten::zeros") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +def zeros(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + # NOTE: no way to set device and layout in ONNX, so we ignore it + return _constant_fill(g, sizes, dtype, 0) + + +@_onnx_symbolic("aten::zeros_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def zeros_like( + g: jit_utils.GraphContext, + input, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + return _constant_fill(g, shape, dtype, 0) + + +@_onnx_symbolic("aten::ones") +@symbolic_helper.parse_args("v", "i", "v", "v", "v") +def ones(g: jit_utils.GraphContext, sizes, dtype, layout, device, pin_memory=False): + return _constant_fill(g, sizes, dtype, 1) + + +@_onnx_symbolic("aten::ones_like") +@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v") +def ones_like( + g: jit_utils.GraphContext, + input, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + return _constant_fill(g, shape, dtype, 1) + + +@_onnx_symbolic("aten::full") +def full( + g: jit_utils.GraphContext, sizes, value, dtype, layout, device, pin_memory=False +): + const_value = symbolic_helper._maybe_get_const(value, "t") + if symbolic_helper._is_value(const_value): + tmp = zeros(g, sizes, dtype, layout, device) + return opset9.add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1))) + else: + dtype = symbolic_helper._get_const(dtype, "i", "dtype") + return _constant_fill(g, sizes, dtype, const_value) + + +@_onnx_symbolic("aten::full_like") +@symbolic_helper.parse_args("v", "f", "i", "v", "v", "v", "v") +def full_like( + g: jit_utils.GraphContext, + input, + fill_value, + dtype, + layout, + device, + pin_memory=False, + memory_format=None, +): + shape = g.op("Shape", input) + return _constant_fill(g, shape, dtype, fill_value) + + +@_onnx_symbolic("aten::repeat") +def repeat(g: jit_utils.GraphContext, self, repeats): + if not symbolic_helper._is_value(repeats): + repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) + if symbolic_helper._is_packed_list(repeats): + repeat_size_len = len(symbolic_helper._unpack_list(repeats)) + else: + const_repeats = symbolic_helper._maybe_get_const(repeats, "is") + repeat_size_len = len(const_repeats) + if self.isCompleteTensor(): + sizes = self.type().sizes() + diff_dims = repeat_size_len - len(sizes) + if diff_dims > 0: + self = opset9.view( + g, self, g.op("Constant", value_t=torch.tensor([1] * diff_dims + sizes)) + ) + return g.op("Tile", self, repeats) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..422210c608beac0312184baf57fb0993f2ddf8de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/utils.py @@ -0,0 +1,2122 @@ +"""Functions to export models into the ONNX IR format. + +These models can be loaded with the ONNX library and then +converted to models which run on other deep learning frameworks. +""" +from __future__ import annotations + +import contextlib +import copy +import inspect +import io +import re +import textwrap +import typing +import warnings +from typing import ( + Any, + Callable, + cast, + Collection, + Dict, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, +) + +import torch +import torch._C._onnx as _C_onnx +import torch.jit._trace +import torch.serialization +from torch import _C +from torch.onnx import ( # noqa: F401 + _constants, + _exporter_states, + errors, + symbolic_caffe2, + symbolic_helper, +) +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import ( + _beartype, + diagnostics, + jit_utils, + onnx_proto_utils, + registration, +) + +__all__ = [ + "is_in_onnx_export", + "select_model_mode_for_export", + "disable_apex_o2_state_dict_hook", + "setup_onnx_logging", + "exporter_context", + "export", + "model_signature", + "warn_on_static_input_change", + "unpack_quantized_tensor", + "export_to_pretty_string", + "unconvertible_ops", + "register_custom_op_symbolic", + "unregister_custom_op_symbolic", +] + + +def is_in_onnx_export() -> bool: + """Returns whether it is in the middle of ONNX export.""" + return GLOBALS.in_onnx_export + + +# TODO(justinchuby): Remove dependency to this global variable from constant_fold.cpp +# Skip check due to cannot import IValue from torch._C +_params_dict = {} # type: ignore[var-annotated] + + +@contextlib.contextmanager +@_beartype.beartype +def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode): + r"""A context manager to temporarily set the training mode of ``model`` + to ``mode``, resetting it when we exit the with-block. + + Args: + model: Same type and meaning as ``model`` arg to :func:`export`. + mode: Same type and meaning as ``training`` arg to :func:`export`. + """ + if not isinstance(mode, _C_onnx.TrainingMode): + raise TypeError( + f"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'." + ) + originally_training: bool = False + + if hasattr(model, "training"): + originally_training = model.training + + # ONNX opset 12 has better support for training amenable models, with updated + # versions of the dropout and batch_norm operators + if mode == _C_onnx.TrainingMode.TRAINING or ( + mode == _C_onnx.TrainingMode.PRESERVE and originally_training + ): + GLOBALS.export_training = True + if GLOBALS.export_onnx_opset_version < 12: + warnings.warn( + "You are exporting the model in training mode with onnx opset " + f"version {GLOBALS.export_onnx_opset_version}. " + "Opset versions lower than opset 12 will not be able to export " + "nodes such as Dropout and BatchNorm correctly." + ) + else: + GLOBALS.export_training = False + + GLOBALS.training_mode = mode + if mode == _C_onnx.TrainingMode.TRAINING: + model.train(True) + elif mode == _C_onnx.TrainingMode.EVAL: + model.train(False) + # else mode == _C_onnx.TrainingMode.PRESERVE, do nothing + + try: + yield + finally: + if hasattr(model, "training") and not mode == _C_onnx.TrainingMode.PRESERVE: + model.train(originally_training) + + +@contextlib.contextmanager +@_beartype.beartype +def disable_apex_o2_state_dict_hook( + model: Union[torch.nn.Module, torch.jit.ScriptFunction] +): + # Apex O2 hook state_dict to return fp16 weights as fp32. + # Exporter cannot identify them as same tensors. + # Since this hook is only used by optimizer, it is safe to + # remove this hook while exporting. + if not isinstance(model, torch.jit.ScriptFunction): + model_hooks = {} # type: ignore[var-annotated] + for module in model.modules(): + for key, hook in module._state_dict_hooks.items(): + if type(hook).__name__ == "O2StateDictHook": + if module not in model_hooks: + model_hooks[module] = {} + model_hooks[module][key] = hook + if module in model_hooks: + for key in model_hooks[module]: + module._state_dict_hooks.pop(key) + try: + yield + finally: + # Add the hooks back + for module, m_map in model_hooks.items(): + for key, hook in m_map.items(): + module._state_dict_hooks[key] = hook + else: + try: + yield + finally: + pass + + +@contextlib.contextmanager +@_beartype.beartype +def setup_onnx_logging(verbose: bool): + is_originally_enabled = torch.onnx.is_onnx_log_enabled() + if is_originally_enabled or verbose: + torch.onnx.enable_log() + try: + yield + finally: + if not is_originally_enabled: + torch.onnx.disable_log() + + +@contextlib.contextmanager +@_beartype.beartype +def exporter_context(model, mode: _C_onnx.TrainingMode, verbose: bool): + with select_model_mode_for_export( + model, mode + ) as mode_ctx, disable_apex_o2_state_dict_hook( + model + ) as apex_ctx, setup_onnx_logging( + verbose + ) as log_ctx, diagnostics.create_export_diagnostic_context() as diagnostic_ctx: + yield (mode_ctx, apex_ctx, log_ctx, diagnostic_ctx) + + +@_beartype.beartype +def export( + model: Union[torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction], + args: Union[Tuple[Any, ...], torch.Tensor], + f: Union[str, io.BytesIO], + export_params: bool = True, + verbose: bool = False, + training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL, + input_names: Optional[Sequence[str]] = None, + output_names: Optional[Sequence[str]] = None, + operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX, + opset_version: Optional[int] = None, + do_constant_folding: bool = True, + dynamic_axes: Optional[ + Union[Mapping[str, Mapping[int, str]], Mapping[str, Sequence[int]]] + ] = None, + keep_initializers_as_inputs: Optional[bool] = None, + custom_opsets: Optional[Mapping[str, int]] = None, + export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]] = False, + autograd_inlining: Optional[bool] = True, +) -> None: + r"""Exports a model into ONNX format. + + If ``model`` is not a :class:`torch.jit.ScriptModule` nor a + :class:`torch.jit.ScriptFunction`, this runs + ``model`` once in order to convert it to a TorchScript graph to be exported + (the equivalent of :func:`torch.jit.trace`). Thus this has the same limited support + for dynamic control flow as :func:`torch.jit.trace`. + + Args: + model (:class:`torch.nn.Module`, :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`): + the model to be exported. + args (tuple or torch.Tensor): + + args can be structured either as: + + 1. ONLY A TUPLE OF ARGUMENTS:: + + args = (x, y, z) + + The tuple should contain model inputs such that ``model(*args)`` is a valid + invocation of the model. Any non-Tensor arguments will be hard-coded into the + exported model; any Tensor arguments will become inputs of the exported model, + in the order they occur in the tuple. + + 2. A TENSOR:: + + args = torch.Tensor([1]) + + This is equivalent to a 1-ary tuple of that Tensor. + + 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS:: + + args = ( + x, + { + "y": input_y, + "z": input_z + } + ) + + All but the last element of the tuple will be passed as non-keyword arguments, + and named arguments will be set from the last element. If a named argument is + not present in the dictionary, it is assigned the default value, or None if a + default value is not provided. + + .. note:: + If a dictionary is the last element of the args tuple, it will be + interpreted as containing named arguments. In order to pass a dict as the + last non-keyword arg, provide an empty dict as the last element of the args + tuple. For example, instead of:: + + torch.onnx.export( + model, + ( + x, + # WRONG: will be interpreted as named arguments + {y: z} + ), + "test.onnx.pb" + ) + + Write:: + + torch.onnx.export( + model, + ( + x, + {y: z}, + {} + ), + "test.onnx.pb" + ) + + f: a file-like object (such that ``f.fileno()`` returns a file descriptor) + or a string containing a file name. A binary protocol buffer will be written + to this file. + export_params (bool, default True): if True, all parameters will + be exported. Set this to False if you want to export an untrained model. + In this case, the exported model will first take all of its parameters + as arguments, with the ordering as specified by ``model.state_dict().values()`` + verbose (bool, default False): if True, prints a description of the + model being exported to stdout. In addition, the final ONNX graph will include the + field ``doc_string``` from the exported model which mentions the source code locations + for ``model``. If True, ONNX exporter logging will be turned on. + training (enum, default TrainingMode.EVAL): + * ``TrainingMode.EVAL``: export the model in inference mode. + * ``TrainingMode.PRESERVE``: export the model in inference mode if model.training is + False and in training mode if model.training is True. + * ``TrainingMode.TRAINING``: export the model in training mode. Disables optimizations + which might interfere with training. + input_names (list of str, default empty list): names to assign to the + input nodes of the graph, in order. + output_names (list of str, default empty list): names to assign to the + output nodes of the graph, in order. + operator_export_type (enum, default OperatorExportTypes.ONNX): + + * ``OperatorExportTypes.ONNX``: Export all ops as regular ONNX ops + (in the default opset domain). + * ``OperatorExportTypes.ONNX_FALLTHROUGH``: Try to convert all ops + to standard ONNX ops in the default opset domain. If unable to do so + (e.g. because support has not been added to convert a particular torch op to ONNX), + fall back to exporting the op into a custom opset domain without conversion. Applies + to `custom ops `_ + as well as ATen ops. For the exported model to be usable, the runtime must support + these non-standard ops. + * ``OperatorExportTypes.ONNX_ATEN``: All ATen ops (in the TorchScript namespace "aten") + are exported as ATen ops (in opset domain "org.pytorch.aten"). + `ATen `_ is PyTorch's built-in tensor library, so + this instructs the runtime to use PyTorch's implementation of these ops. + + .. warning:: + + Models exported this way are probably runnable only by Caffe2. + + This may be useful if the numeric differences in implementations of operators are + causing large differences in behavior between PyTorch and Caffe2 (which is more + common on untrained models). + + * ``OperatorExportTypes.ONNX_ATEN_FALLBACK``: Try to export each ATen op + (in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so + (e.g. because support has not been added to convert a particular torch op to ONNX), + fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for + context. + For example:: + + graph(%0 : Float): + %3 : int = prim::Constant[value=0]() + # conversion unsupported + %4 : Float = aten::triu(%0, %3) + # conversion supported + %5 : Float = aten::mul(%4, %0) + return (%5) + + Assuming ``aten::triu`` is not supported in ONNX, this will be exported as:: + + graph(%0 : Float): + %1 : Long() = onnx::Constant[value={0}]() + # not converted + %2 : Float = aten::ATen[operator="triu"](%0, %1) + # converted + %3 : Float = onnx::Mul(%2, %0) + return (%3) + + If PyTorch was built with Caffe2 (i.e. with ``BUILD_CAFFE2=1``), then + Caffe2-specific behavior will be enabled, including special support + for ops are produced by the modules described in + `Quantization `_. + + .. warning:: + + Models exported this way are probably runnable only by Caffe2. + + opset_version (int, default 17): The version of the + `default (ai.onnx) opset `_ + to target. Must be >= 7 and <= 17. + do_constant_folding (bool, default True): Apply the constant-folding optimization. + Constant-folding will replace some of the ops that have all constant inputs + with pre-computed constant nodes. + dynamic_axes (dict[string, dict[int, string]] or dict[string, list(int)], default empty dict): + + By default the exported model will have the shapes of all input and output tensors + set to exactly match those given in ``args``. To specify axes of tensors as + dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema: + + * KEY (str): an input or output name. Each name must also be provided in ``input_names`` or + ``output_names``. + * VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a + list, each element is an axis index. + + For example:: + + class SumModule(torch.nn.Module): + def forward(self, x): + return torch.sum(x, dim=1) + + torch.onnx.export( + SumModule(), + (torch.ones(2, 2),), + "onnx.pb", + input_names=["x"], + output_names=["sum"] + ) + + Produces:: + + input { + name: "x" + ... + shape { + dim { + dim_value: 2 # axis 0 + } + dim { + dim_value: 2 # axis 1 + ... + output { + name: "sum" + ... + shape { + dim { + dim_value: 2 # axis 0 + ... + + While:: + + torch.onnx.export( + SumModule(), + (torch.ones(2, 2),), + "onnx.pb", + input_names=["x"], + output_names=["sum"], + dynamic_axes={ + # dict value: manually named axes + "x": {0: "my_custom_axis_name"}, + # list value: automatic names + "sum": [0], + } + ) + + Produces:: + + input { + name: "x" + ... + shape { + dim { + dim_param: "my_custom_axis_name" # axis 0 + } + dim { + dim_value: 2 # axis 1 + ... + output { + name: "sum" + ... + shape { + dim { + dim_param: "sum_dynamic_axes_1" # axis 0 + ... + + keep_initializers_as_inputs (bool, default None): If True, all the + initializers (typically corresponding to parameters) in the + exported graph will also be added as inputs to the graph. If False, + then initializers are not added as inputs to the graph, and only + the non-parameter inputs are added as inputs. + This may allow for better optimizations (e.g. constant folding) by + backends/runtimes. + + If True, `deduplicate_initializers` pass will not be executed. This means + initializers with duplicated values will not be deduplicated and + will be treated as distinct inputs to the graph. This allows different + input initializers to be supplied at the runtime following export. + + If ``opset_version < 9``, initializers MUST be part of graph + inputs and this argument will be ignored and the behavior will be + equivalent to setting this argument to True. + + If None, then the behavior is chosen automatically as follows: + + * If ``operator_export_type=OperatorExportTypes.ONNX``, the behavior is equivalent + to setting this argument to False. + * Else, the behavior is equivalent to setting this argument to True. + + custom_opsets (dict[str, int], default empty dict): A dict with schema: + + * KEY (str): opset domain name + * VALUE (int): opset version + + If a custom opset is referenced by ``model`` but not mentioned in this dictionary, + the opset version is set to 1. Only custom opset domain name and version should be + indicated through this argument. + + export_modules_as_functions (bool or set of type of nn.Module, default False): Flag to enable + exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the + particular types of modules to export as local functions in ONNX. + This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because + ``opset_version`` < 15 implies IR version < 8, which means no local function support. + Module variables will be exported as function attributes. There are two categories of function + attributes. + + 1. Annotated attributes: class variables that have type annotations via + `PEP 526-style `_ + will be exported as attributes. + Annotated attributes are not used inside the subgraph of ONNX local function because + they are not created by PyTorch JIT tracing, but they may be used by consumers + to determine whether or not to replace the function with a particular fused kernel. + + 2. Inferred attributes: variables that are used by operators inside the module. Attribute names + will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from + python module annotations. Inferred attributes are used inside the subgraph of ONNX local function. + + * ``False`` (default): export ``nn.Module`` forward calls as fine grained nodes. + * ``True``: export all ``nn.Module`` forward calls as local function nodes. + * Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes, + only if the type of the ``nn.Module`` is found in the set. + + autograd_inlining (bool, default True): Flag used to control whether to inline autograd functions. + Refer to https://github.com/pytorch/pytorch/pull/74765 for more details. + + Raises: + :class:`torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph. + :class:`torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it + uses an operator that is not supported by the exporter. + :class:`torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export. + All errors are subclasses of :class:`errors.OnnxExporterError`. + """ + + _export( + model, + args, + f, + export_params, + verbose, + training, + input_names, + output_names, + operator_export_type=operator_export_type, + opset_version=opset_version, + do_constant_folding=do_constant_folding, + dynamic_axes=dynamic_axes, + keep_initializers_as_inputs=keep_initializers_as_inputs, + custom_opsets=custom_opsets, + export_modules_as_functions=export_modules_as_functions, + autograd_inlining=autograd_inlining, + ) + + +@_beartype.beartype +def _is_constant_tensor_list(node): + if node.kind() != "prim::Constant": + return False + output_type = node.output().type() + if output_type.isSubtypeOf(_C.ListType.ofTensors()): + return True + if output_type.isSubtypeOf(_C.ListType(_C.OptionalType.ofTensor())): + return True + + +# ONNX can't handle constants that are lists of tensors, which can +# get generated in constant prop. So we split them back into prim::ListConstructs + + +@_beartype.beartype +def _split_tensor_list_constants(g, block): + for node in block.nodes(): + for subblock in node.blocks(): + _split_tensor_list_constants(g, subblock) + if _is_constant_tensor_list(node): + inputs = [] + for val in node.output().toIValue(): + input = g.insertConstant(val) + input.node().moveBefore(node) + input.node().copyMetadata(node) + inputs.append(input) + + lc = ( + g.create("prim::ListConstruct", inputs) + .insertBefore(node) + .output() + .setType(_C.ListType.ofTensors()) + ) + lc.node().copyMetadata(node) + node.output().replaceAllUsesWith(lc) + + +@_beartype.beartype +def _optimize_graph( + graph: _C.Graph, + operator_export_type: _C_onnx.OperatorExportTypes, + _disable_torch_constant_prop: bool = False, + fixed_batch_size: bool = False, + params_dict=None, + dynamic_axes=None, + input_names=None, + module=None, +): + if params_dict is None: + params_dict = {} + + # Inline everything + _C._jit_pass_inline(graph) + + # Remove fork/wait nodes + _C._jit_pass_inline_fork_wait(graph) + _C._jit_pass_lint(graph) + if GLOBALS.autograd_inlining: + _C._jit_pass_onnx_autograd_function_process(graph) + _C._jit_pass_lower_all_tuples(graph) + + # we now record some ops like ones/zeros + # into a trace where we previously recorded constants. + # use constant prop to maintain our current level of onnx support + # without implementing symbolics for all of them + if _disable_torch_constant_prop is False: + _C._jit_pass_constant_propagation(graph) + + _split_tensor_list_constants(graph, graph) + # run dce to eliminate dead parts of the graph that might have been + # left behind by things like symbolic_override + _C._jit_pass_dce(graph) + _C._jit_pass_lint(graph) + + # CSE should improve perf when Autocast is used with disabled cache + # Autocast is disabled due to a limitation on tracer as described at https://github.com/pytorch/pytorch/issues/84092 + # Must run before _C._jit_pass_erase_number_types to prevent type substitution + if _C._jit_pass_cse(graph): + _C._jit_pass_onnx_lint(graph) + + _C._jit_pass_canonicalize_graph_fuser_ops(graph) + _C._jit_pass_lint(graph) + _C._jit_pass_peephole(graph, True) + _C._jit_pass_fuse_addmm(graph) + _C._jit_pass_lint(graph) + + _C._jit_pass_peephole(graph, True) + _C._jit_pass_lower_all_tuples(graph) + # in _jit_pass_onnx, symbolic functions are called for each node for conversion. + # However, there are nodes that cannot be converted without additional context. + # For example, the number of outputs from split (and whether it is static or dynamic) is unknown + # until the point where it is unpacked by listUnpack node. + # This pass does a preprocess, and prepares the nodes such that enough context can be received + # by the symbolic function. + _C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module) + _C._jit_pass_onnx_preprocess(graph) + + # onnx does not support tuples, so try to remove them + _C._jit_pass_lint(graph) + + # onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0 + _C._jit_pass_prepare_division_for_onnx(graph) + + _C._jit_pass_onnx_remove_print(graph) + _C._jit_pass_onnx_preprocess_caffe2(graph) + + symbolic_helper._quantized_ops.clear() + # Unpack quantized weights for conv and linear ops and insert into graph. + _C._jit_pass_onnx_unpack_quantized_weights( + graph, params_dict, symbolic_helper.is_caffe2_aten_fallback() + ) + if symbolic_helper.is_caffe2_aten_fallback(): + # Insert permutes before and after each conv op to ensure correct order. + _C._jit_pass_onnx_quantization_insert_permutes(graph, params_dict) + + # Find consecutive permutes that are no-ops and remove them. + _C._jit_pass_custom_pattern_based_rewrite_graph( + textwrap.dedent( + """\ + graph(%Pi): + %Pq = quantized::nhwc2nchw(%Pi) + %Pr = quantized::nchw2nhwc(%Pq) + return (%Pr)""" + ), + textwrap.dedent( + """\ + graph(%Ri): + return (%Ri)""" + ), + graph, + ) + + # onnx only supports tensors, so we turn all out number types into tensors + _C._jit_pass_erase_number_types(graph) + if GLOBALS.onnx_shape_inference: + input_names = [] if input_names is None else input_names + dynamic_axes = {} if dynamic_axes is None else dynamic_axes + _C._jit_pass_onnx_set_dynamic_input_shape(graph, dynamic_axes, input_names) + _C._jit_pass_onnx_lint(graph) + + graph = _C._jit_pass_onnx(graph, operator_export_type) + _C._jit_pass_onnx_lint(graph) + _C._jit_pass_lint(graph) + + _C._jit_pass_onnx_scalar_type_analysis( + graph, True, GLOBALS.export_onnx_opset_version + ) + _C._jit_pass_lint(graph) + + _C._jit_pass_onnx_peephole( + graph, GLOBALS.export_onnx_opset_version, fixed_batch_size + ) + _C._jit_pass_lint(graph) + + # graph is not a valid jit graph anymore because types have been replaced + # (e.g. int with Tensor), so it now contains operators that don't actually + # exist. We can't run normal dead code elimination because it'd fail trying + # to look up if an operator has side effects, but we can run a dead code + # elimination variant that doesn't need to look up if an op has side effects. + _C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph) + _C._jit_pass_lint(graph) + graph = _C._jit_pass_canonicalize(graph) + _C._jit_pass_lint(graph) + if GLOBALS.onnx_shape_inference: + try: + _C._jit_pass_onnx_graph_shape_type_inference( + graph, params_dict, GLOBALS.export_onnx_opset_version + ) + except RuntimeError as exc: + if ( + _C_onnx._CAFFE2_ATEN_FALLBACK + and exc.args[0] + == "ScalarType UNKNOWN_SCALAR is an unexpected tensor scalar type!" + ): + # Caffe2 builds can have UNKNOWN_SCALAR for some tensors + pass + + return graph + + +@_beartype.beartype +def warn_on_static_input_change(input_states): + """Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph. + + We accept dictionaries and strings as ONNX inputs, but they should be only for + configuration use. we detect here if these inputs are modified, and if so we warn + the user that the changes won't take effect in the traced ONNX graph. + """ + for input, traced_input in zip(input_states[0], input_states[1]): + if isinstance(input, dict): + if list(input.keys()) != list(traced_input.keys()): + warning = ( + "We detected that you are modifying a dictionary that is an input to your " + "model. " + "Note that dictionaries are allowed as inputs in ONNX but they should be " + "handled with care. " + "Usages of dictionaries is not recommended, and should not be used except " + "for configuration use. " + "Also note that the order and values of the keys must remain the same. " + ) + warnings.warn(warning) + elif isinstance(input, str): + if input != traced_input: + warning = ( + "The model seems to have string inputs/outputs. " + "Note that strings will not appear as inputs/outputs of the ONNX graph. " + ) + warnings.warn(warning) + + +@_beartype.beartype +def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type): + """Resolves the arguments that are ignored when export_type != operator_export_type.ONNX.""" + if ( + operator_export_type is not operator_export_type.ONNX + and _C_onnx._CAFFE2_ATEN_FALLBACK + ): + if arg_value is True: + warnings.warn( + f"'{arg_name}' can be set to True only when 'operator_export_type' is " + "`ONNX`. Since 'operator_export_type' is not set to 'ONNX', " + f"'{arg_name}' argument will be ignored." + ) + arg_value = False + return arg_value + + +@_beartype.beartype +def _decide_keep_init_as_input( + keep_initializers_as_inputs: Optional[bool], + operator_export_type: _C_onnx.OperatorExportTypes, + opset_version: int, +): + """Decides whether the initializers in the graph should be listed as ONNX graph inputs. + + This method encapsulates the logic to decide whether the initializers in the graph + should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4). + If keep_initializers_as_inputs is not specified (None), then we decide whether to keep + initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type + is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other + export types keep initializers as input (val_keep_init_as_ip=True). + If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8, + in which case it must be ignored because for opset version <= 8, all initializers MUST be + part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True. + + Special handling is needed for opset version 8 or lower, because irrespective + of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3 + semantics, i.e. all initializers must be listed as ONNX graph input. + """ + + if opset_version < 9: + if keep_initializers_as_inputs is False: + warnings.warn( + "Setting 'keep_initializers_as_inputs=False' for opset version" + "8 or lower would lead to an invalid ONNX graph. Therefore, " + "'keep_initializers_as_inputs=False' is ignored during export." + "Exported model will have initializers as graph inputs (compliant " + " to ONNX IR v3)." + ) + return True # i.e. True == initializers are part of graph input (ONNX IR v3) + val_keep_init_as_ip = ( + True if keep_initializers_as_inputs is None else keep_initializers_as_inputs + ) + if ( + keep_initializers_as_inputs is None + and operator_export_type is _C_onnx.OperatorExportTypes.ONNX + ): + val_keep_init_as_ip = False + return val_keep_init_as_ip + + +@_beartype.beartype +def _decide_add_node_names(add_node_names, operator_export_type): + return _resolve_args_by_export_type( + "add_node_names", add_node_names, operator_export_type + ) + + +@_beartype.beartype +def _decide_constant_folding(do_constant_folding, operator_export_type, training): + do_constant_folding = _resolve_args_by_export_type( + "do_constant_folding", do_constant_folding, operator_export_type + ) + if do_constant_folding and ( + training is not None and training is not _C_onnx.TrainingMode.EVAL + ): + warnings.warn( + "It is recommended that constant folding be turned off ('do_constant_folding=False') " + "when exporting the model in training-amenable mode, i.e. with 'training=TrainingMode.TRAIN' " + "or 'training=TrainingMode.PRESERVE' (when model is in training mode). Otherwise, some " + "learnable model parameters may not translate correctly in the exported ONNX model " + "because constant folding mutates model parameters. Please consider " + "turning off constant folding or setting the training=TrainingMode.EVAL." + ) + return do_constant_folding + + +@_beartype.beartype +def _signature(model) -> inspect.Signature: + should_be_callable = getattr(model, "forward", model) + if callable(should_be_callable): + return inspect.signature(should_be_callable) + raise ValueError("model has no forward method and is not callable") + + +@_beartype.beartype +def _decide_input_format(model, args): + try: + sig = _signature(model) + except ValueError as e: + warnings.warn(f"{e}, skipping _decide_input_format") + return args + try: + ordered_list_keys = list(sig.parameters.keys()) + if ordered_list_keys[0] == "self": + ordered_list_keys = ordered_list_keys[1:] + args_dict: Dict = {} + if isinstance(args, list): + args_list = args + elif isinstance(args, tuple): + args_list = list(args) + else: + args_list = [args] + if isinstance(args_list[-1], dict): + args_dict = args_list[-1] + args_list = args_list[:-1] + n_nonkeyword = len(args_list) + for optional_arg in ordered_list_keys[n_nonkeyword:]: + if optional_arg in args_dict: + args_list.append(args_dict[optional_arg]) + # Check if this arg has a default value + else: + param = sig.parameters[optional_arg] + if param.default != param.empty: + args_list.append(param.default) + args = args_list if isinstance(args, list) else tuple(args_list) + # Cases of models with no input args + except IndexError: + warnings.warn("No input args, skipping _decide_input_format") + except Exception as e: + warnings.warn(f"Skipping _decide_input_format\n {e.args[0]}") + + return args + + +@_beartype.beartype +def _trace(func, args, operator_export_type, return_outs=False): + # Special case for common case of passing a single Tensor + if isinstance(args, torch.Tensor): + args = (args,) + + trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph( + func, + args, + strict=False, + _force_outplace=False, + _return_inputs_states=True, + ) + warn_on_static_input_change(inputs_states) + + trace_graph = _optimize_graph(trace_graph, operator_export_type, params_dict={}) + if return_outs: + return trace_graph, torch_out + return trace_graph + + +@_beartype.beartype +def _trace_and_get_graph_from_model(model, args): + # A basic sanity check: make sure the state_dict keys are the same + # before and after running the model. Fail fast! + orig_state_dict_keys = torch.jit._unique_state_dict(model).keys() + + # Disable Autocast cache because it replaces kernel's weight and bias + # by (undesired) constants. + # No perf impact for when there are reused weights since https://github.com/pytorch/pytorch/pull/85665 + # TODO: https://github.com/pytorch/pytorch/issues/84092 + prev_autocast_cache_enabled = torch.is_autocast_cache_enabled() + torch.set_autocast_cache_enabled(False) + trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph( + model, + args, + strict=False, + _force_outplace=False, + _return_inputs_states=True, + ) + torch.set_autocast_cache_enabled(prev_autocast_cache_enabled) + + warn_on_static_input_change(inputs_states) + + if orig_state_dict_keys != torch.jit._unique_state_dict(model).keys(): + raise RuntimeError( + "state_dict changed after running the tracer; " + "something weird is happening in your model!" + ) + + return trace_graph, torch_out + + +@_beartype.beartype +def _get_param_count_list(method_graph, args_params): + param_count_list = [] + for input_, arg_params_ in zip(method_graph.inputs(), args_params): + if "PackedParams" in str(input_.type()): + in_vars, _ = torch.jit._flatten(arg_params_) + param_count_list.append(len(in_vars)) + else: + param_count_list.append(arg_params_ is not None) + + return param_count_list + + +@_beartype.beartype +def _check_flatten_did_not_remove(original, jit_flattened): + """torch.jit._flatten removes None. Check if it did so in this case.""" + + @_beartype.beartype + def flatten(x): + if isinstance(x, (list, tuple)): + for inner in x: + yield from flatten(inner) + elif isinstance(x, dict): + for inner in x.values(): + yield from flatten(inner) + else: + yield x + + flattened_with_none = list(flatten(original)) + num_none = len(flattened_with_none) - len(jit_flattened) + assert num_none >= 0 + if num_none: + raise ValueError( + f"args contained {num_none} None's after flattening. " + "When exporting a ScriptModule or ScriptFunction, no args may " + "be None because that breaks type propagation." + ) + + +def _create_jit_graph( + model: Union[torch.nn.Module, torch.jit.ScriptFunction], args: Sequence[Any] +) -> Tuple[_C.Graph, List[_C.IValue], Optional[Any], Optional[_C.ScriptModule]]: + if isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)): + flattened_args = tuple(torch.jit._flatten(tuple(args))[0]) + _check_flatten_did_not_remove(args, flattened_args) + torch_out = None + + if isinstance(model, torch.jit.ScriptModule): + try: + graph = model.forward.graph # type: ignore[attr-defined] + except AttributeError as e: + raise RuntimeError("'forward' method must be a script method") from e + _C._jit_pass_onnx_function_substitution(graph) + freezed_module = _C._freeze_module( + cast(_C.ScriptModule, model._c), preserveParameters=True + ) + module, params = _C._jit_onnx_list_model_parameters(freezed_module) + method_graph = module._get_method("forward").graph + args_params = tuple(args) + tuple(params) + param_count_list = _get_param_count_list(method_graph, args_params) + in_vars, _ = torch.jit._flatten(args_params) + graph = _C._propagate_and_assign_input_shapes( + method_graph, tuple(in_vars), param_count_list, False, False + ) + return graph, params, torch_out, module + + # torch.jit.ScriptFunction + params = [] + graph = model.graph + _C._jit_pass_onnx_function_substitution(graph) + param_count_list = _get_param_count_list(graph, args) + graph = _C._propagate_and_assign_input_shapes( + graph, flattened_args, param_count_list, False, False + ) + return graph, params, torch_out, None + + graph, torch_out = _trace_and_get_graph_from_model(model, args) + _C._jit_pass_onnx_lint(graph) + state_dict = torch.jit._unique_state_dict(model) + params = list(state_dict.values()) + graph_inputs = list(graph.inputs()) + user_input_num = len(graph_inputs) - len(state_dict) + param_names = list(state_dict.keys()) + for i, inp in enumerate(graph_inputs): + if i >= user_input_num: + inp.setDebugName(param_names[i - user_input_num]) + _C._jit_pass_onnx_function_substitution(graph) + return graph, params, torch_out, None + + +@_beartype.beartype +def _get_named_param_dict(graph, params): + input_and_param_names = [val.debugName() for val in graph.inputs()] + param_names = input_and_param_names[len(input_and_param_names) - len(params) :] + _params_dict = dict(zip(param_names, params)) + return _params_dict + + +@_beartype.beartype +def _get_example_outputs(model, args): + input_args = copy.deepcopy(args) + input_kwargs = {} + if input_args and isinstance(input_args[-1], dict): + input_kwargs = input_args[-1] + input_args = input_args[:-1] + + example_outputs = model(*input_args, **input_kwargs) + if isinstance(example_outputs, list): + example_outputs = [example_outputs] + elif not isinstance(example_outputs, tuple): + example_outputs = (example_outputs,) + + return example_outputs + + +_qtype_vtype_map = { + torch.quint8: torch.uint8, + torch.qint8: torch.int8, + torch.qint32: torch.int32, + torch.quint4x2: torch.int8, +} + + +@_beartype.beartype +def unpack_quantized_tensor(value, cast_onnx_accepted=True): + if isinstance(value, torch.Tensor) and value.dtype in _qtype_vtype_map: + q_value_dequantize = value.dequantize() + q_scale = ( + torch.tensor(value.q_scale(), dtype=torch.double) + if cast_onnx_accepted + else torch.tensor(value.q_scale(), dtype=torch.float32) + ) + q_zero_point = ( + torch.tensor(value.q_zero_point(), dtype=torch.int64) + if cast_onnx_accepted + else torch.tensor(value.q_zero_point(), dtype=_qtype_vtype_map[value.dtype]) + ) + q_value = q_value_dequantize / q_scale + q_zero_point + q_value = q_value.to(dtype=_qtype_vtype_map[value.dtype]) + return q_value, q_scale, q_zero_point + else: + return (value,) + + +@_beartype.beartype +def _pre_trace_quant_model(model, args): + r"""Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return + original model. + + This is due to https://github.com/pytorch/pytorch/issues/75761. + """ + if any( + hasattr(m, "_packed_params") for m in getattr(model, "modules", list)() + ) or any(getattr(arg, "is_quantized", False) for arg in args): + return torch.jit.trace(model, args) + return model + + +@_beartype.beartype +def _model_to_graph( + model, + args, + verbose=False, + input_names=None, + output_names=None, + operator_export_type=_C_onnx.OperatorExportTypes.ONNX, + do_constant_folding=True, + _disable_torch_constant_prop=False, + fixed_batch_size=False, + training=_C_onnx.TrainingMode.EVAL, + dynamic_axes=None, +) -> Tuple[ + _C.Graph, + Dict[str, torch.Tensor], + Optional[ + Union[ + torch.Tensor, + Tuple[torch.Tensor, ...], + List[torch.Tensor], + Dict[str, torch.Tensor], + Any, # Can be nested tuples etc. + ] + ], +]: + """Converts model into an ONNX graph. + + Returns: + graph: A TorchScript IR Graph with ONNX nodes. + params_dict: Dict from input param name to param value. + torch_out: The output tensors resulting from the trace of ``model``. + If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`, + this will be None, since we are not doing any tracing. + """ + # TODO: can we simplify this to always return a tuple of Tensor or None? + + # Special case for common case of passing a single Tensor + if isinstance(args, (torch.Tensor, int, float, bool)): + args = (args,) + + model = _pre_trace_quant_model(model, args) + graph, params, torch_out, module = _create_jit_graph(model, args) + params_dict = _get_named_param_dict(graph, params) + + try: + graph = _optimize_graph( + graph, + operator_export_type, + _disable_torch_constant_prop=_disable_torch_constant_prop, + fixed_batch_size=fixed_batch_size, + params_dict=params_dict, + dynamic_axes=dynamic_axes, + input_names=input_names, + module=module, + ) + except Exception as e: + torch.onnx.log("Torch IR graph at exception: ", graph) + raise + + is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)) + if is_script: + example_outputs = _get_example_outputs(model, args) + example_outputs_final = () + for example_output in example_outputs: + example_outputs_final += unpack_quantized_tensor(example_output) + out_vars, desc = torch.jit._flatten(example_outputs_final) + _C._jit_pass_onnx_assign_output_shape( + graph, + out_vars, + desc, + GLOBALS.onnx_shape_inference, + is_script, + GLOBALS.export_onnx_opset_version, + ) + + # NB: ONNX requires complete information about output types, which might be + # erased by some optimizations, so we need to set it explicitly again. + else: + if not isinstance(torch_out, (list, tuple)): + output_wrapped = [torch_out] + else: + output_wrapped = torch_out # type: ignore[assignment] + + output_tensors, out_desc = torch.jit._flatten(tuple(output_wrapped)) + # assign_output_shape pass is not compatible with quantized outputs. + # Quantized outputs are flattened to 3 values in ONNX, while packed as + # single value in PyTorch. + if not any(getattr(out, "is_quantized", False) for out in output_tensors): + _C._jit_pass_onnx_assign_output_shape( + graph, + output_tensors, + out_desc, + GLOBALS.onnx_shape_inference, + is_script, + GLOBALS.export_onnx_opset_version, + ) + + _set_input_and_output_names(graph, input_names, output_names) + params_dict = _get_named_param_dict(graph, params) + + if ( + do_constant_folding + and GLOBALS.export_onnx_opset_version + >= _constants.ONNX_CONSTANT_FOLDING_MIN_OPSET + ): + if training is None or training == _C_onnx.TrainingMode.EVAL: + params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict) + + params_dict = _C._jit_pass_onnx_constant_fold( + graph, params_dict, GLOBALS.export_onnx_opset_version + ) + _C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph) + + if GLOBALS.onnx_shape_inference: + try: + _C._jit_pass_onnx_graph_shape_type_inference( + graph, params_dict, GLOBALS.export_onnx_opset_version + ) + except RuntimeError as exc: + if ( + _C_onnx._CAFFE2_ATEN_FALLBACK + and exc.args[0] + == "ScalarType UNKNOWN_SCALAR is an unexpected tensor scalar type!" + ): + # Caffe2 builds can have UNKNOWN_SCALAR for some tensors + pass + + params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict) + + # For ONNX opset < 9, constants only have three data types: float16, float, double. + # In this pass transform constants of other data types to float/double + cast operator. + if GLOBALS.export_onnx_opset_version < 9: + _C._jit_pass_onnx_cast_all_constant_to_floating(graph) + + params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict) + _C._jit_decay_packed_param_input_types(graph) + + # If output names lack a proper name and are identified only by their unique + # give them a legible name for debugging purposes + _apply_friendly_debug_names(graph, params_dict) + + return graph, params_dict, torch_out + + +@_beartype.beartype +@torch._disable_dynamo +def export_to_pretty_string( + model, + args, + export_params=True, + verbose=False, + training=_C_onnx.TrainingMode.EVAL, + input_names=None, + output_names=None, + operator_export_type=_C_onnx.OperatorExportTypes.ONNX, + export_type=None, + google_printer=False, + opset_version=None, + keep_initializers_as_inputs=None, + custom_opsets=None, + add_node_names=True, + do_constant_folding=True, + dynamic_axes=None, +): + r""" + Similar to :func:`export`, but returns a text representation of the ONNX + model. Only differences in args listed below. All other args are the same + as :func:`export`. + + Args: + add_node_names (bool, default True): Whether or not to set + NodeProto.name. This makes no difference unless + ``google_printer=True``. + google_printer (bool, default False): If False, will return a custom, + compact representation of the model. If True will return the + protobuf's `Message::DebugString()`, which is more verbose. + + Returns: + A UTF-8 str containing a human-readable representation of the ONNX model. + """ + if opset_version is None: + opset_version = _constants.ONNX_DEFAULT_OPSET + if custom_opsets is None: + custom_opsets = {} + GLOBALS.export_onnx_opset_version = opset_version + GLOBALS.operator_export_type = operator_export_type + + with exporter_context(model, training, verbose): + val_keep_init_as_ip = _decide_keep_init_as_input( + keep_initializers_as_inputs, operator_export_type, opset_version + ) + val_add_node_names = _decide_add_node_names( + add_node_names, operator_export_type + ) + val_do_constant_folding = _decide_constant_folding( + do_constant_folding, operator_export_type, training + ) + args = _decide_input_format(model, args) + graph, params_dict, torch_out = _model_to_graph( + model, + args, + verbose, + input_names, + output_names, + operator_export_type, + val_do_constant_folding, + training=training, + dynamic_axes=dynamic_axes, + ) + + return graph._pretty_print_onnx( # type: ignore[attr-defined] + params_dict, + opset_version, + False, + operator_export_type, + google_printer, + val_keep_init_as_ip, + custom_opsets, + val_add_node_names, + ) + + +@_beartype.beartype +def unconvertible_ops( + model, + args, + training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL, + opset_version: Optional[int] = None, +) -> Tuple[_C.Graph, List[str]]: + """Returns an approximated list of all ops that are yet supported by :mod:`torch.onnx`. + + The list is approximated because some ops may be removed during the conversion + process and don't need to be converted. Some other ops may have partial support + that will fail conversion with particular inputs. Please open a Github Issue + for op support requests. + + Args: + model: Same as the `model` parameter in :func:`torch.onnx.export`. + args: Same as the `args` parameter in :func:`torch.onnx.export`. + training: Same as the `training` parameter in :func:`torch.onnx.export`. + opset_version: Same as the `opset_version` parameter in :func:`torch.onnx.export`. + + Returns: + The JIT graph and a list of unconvertible ops in the format of "domain::op". + """ + + opset_version = opset_version or _constants.ONNX_DEFAULT_OPSET + GLOBALS.export_onnx_opset_version = opset_version + + try: + with exporter_context(model, training, verbose=False): + # Create a mostly clean JIT graph that contains the plain aten and + # other ops we can check with the symbolic registry. + # NOTE: We don't want to actually convert any ops to ONNX or run any + # symbolic functions because there is a higher chance that a pass + # fails or an unconvertible op messes up the graph during ONNX conversion. + # This way we can always generate a list just by looking at the names + # of the ops in the graph. + args = _decide_input_format(model, args) + model = _pre_trace_quant_model(model, args) + graph, _, _, module = _create_jit_graph(model, args) + _C._jit_pass_inline(graph) + _C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module) + _C._jit_pass_erase_number_types(graph) + _C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph) + except Exception as e: + raise errors.OnnxExporterError( + "Failed to discover unconvertible ops because of errors during the JIT graph " + "generation process." + ) from e + + unsupported_ops = [] + for node in graph.nodes(): + domain_op = node.kind() + if domain_op.startswith(("onnx::", "prim::")): + # We consider onnx and prim ops as supported ops, even though some "prim" + # ops are not implemented as symbolic functions, because they may be + # eliminated in the conversion passes. Users may still see errors caused + # by prim ops even though they don't show up in the list. + continue + if not registration.registry.is_registered_op( + domain_op.rstrip("_"), opset_version + ): + # We consider all registered ops supported, even though some of them are + # only partially supported, because there is not yet a good way to check + # if an op is fully supported. + # TODO(justinchuby): Create a way to check if an op is fully supported. + unsupported_ops.append(domain_op) + return graph, unsupported_ops + + +@_beartype.beartype +def _setup_trace_module_map( + model: Union[torch.nn.Module, torch.jit.ScriptModule], + export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]], +) -> Set[str]: + def __register_attribute_hook(): + attr_name = "_onnx_attrs" + + def _track_module_attributes_forward_pre_hook(module, input): + setattr(module, attr_name, _get_module_attributes(module)) + + def _track_module_attributes_forward_hook(module, input, output): + tracing_state = _C._get_tracing_state() + if not tracing_state: + return + + graph = tracing_state.graph() + onnx_attrs = {} + if hasattr(module, attr_name): + onnx_attrs = getattr(module, attr_name) + delattr(module, attr_name) + + _C._jit_pass_onnx_track_scope_attributes(graph, onnx_attrs) + + for m in model.modules(): + m.register_forward_hook(_track_module_attributes_forward_hook) + m.register_forward_pre_hook(_track_module_attributes_forward_pre_hook) + + def _unqualified_variable_name(qualified_name: str) -> str: + """ + Parse qualified variable name and return the unqualified version. + + Pure numeric atoms are considered inadequate, so this function will look past them, + and start from the first non-numeric atom. + + Example: + >>> _unqualified_variable_name('__main__.Foo.bar') + 'bar' + >>> _unqualified_variable_name('__main__.Foo.bar.0') + 'bar.0' + """ + name_atoms = qualified_name.split(".") + for i, atom in reversed(list(enumerate(name_atoms))): + if not atom.isnumeric(): + return ".".join(name_atoms[i:]) + return qualified_name + + trace_module_map = { + _m: torch._C._jit_onnx_create_full_scope_name( + torch.typename(type(_m)), _unqualified_variable_name(_n) + ) + for _n, _m in model.named_modules() + } + torch.jit._trace._trace_module_map = trace_module_map + if isinstance(export_modules_as_functions, bool) and export_modules_as_functions: + module_typenames = {torch.typename(type(module)) for module in trace_module_map} + elif isinstance(export_modules_as_functions, set) and export_modules_as_functions: + + def _find_typename(v): + if isinstance(v, type): + return torch.typename(v) + else: + raise RuntimeError( + "Only type of the `nn.Module` should be " + "passed in the set for argument `export_modules_as_functions`. " + "Got `%s`." % (type(v).__name__) + ) + + module_typenames = {_find_typename(v) for v in export_modules_as_functions} + else: + module_typenames = set() + + if module_typenames: + __register_attribute_hook() + + return module_typenames + + +@_beartype.beartype +def _reset_trace_module_map(): + torch.jit._trace._trace_module_map = None + _C._jit_pass_onnx_clear_scope_records() + + +@_beartype.beartype +def _get_module_attributes(module): + annotations = typing.get_type_hints(type(module)) + base_m_annotations = typing.get_type_hints(torch.nn.Module) + [annotations.pop(k, None) for k in base_m_annotations] + # Check whether module attributes can be accessed. Some classes + # define attributes but don't provide access to them in their + # constructor. + # + # For example, torch.nn.Embedding has the `freeze` variable and its + # type specified in the class but the attribute is not created in the + # constructor. In other words, there is no `self.freeze = ` + # in the constructor. + # + # Reference: https://github.com/pytorch/pytorch/blob/92de1d322223fb5584e384971b32c46b93bc2f4b/torch/nn/modules/sparse.py#L120 + attrs = {} + for k in annotations: + try: + attrs[k] = getattr(module, k) + except AttributeError: + torch.onnx.log(f"Skipping module attribute '{k}'") + continue + return attrs + + +@_beartype.beartype +def _export( + model, + args, + f, + export_params=True, + verbose=False, + training=_C_onnx.TrainingMode.EVAL, + input_names=None, + output_names=None, + operator_export_type=_C_onnx.OperatorExportTypes.ONNX, + export_type=None, + opset_version=None, + do_constant_folding=True, + dynamic_axes=None, + keep_initializers_as_inputs=None, + fixed_batch_size=False, + custom_opsets=None, + add_node_names=True, + onnx_shape_inference=True, + export_modules_as_functions=False, + autograd_inlining=True, +): + assert GLOBALS.in_onnx_export is False + + if export_type is None: + export_type = _exporter_states.ExportTypes.PROTOBUF_FILE + + # Discussed deprecation with Nikita Shulga and Sergii Dymchenko from Meta + if _C_onnx._CAFFE2_ATEN_FALLBACK: + warnings.warn( + "Caffe2 ONNX exporter is deprecated in version 2.0 and will be " + "removed in 2.2. Please use PyTorch 2.1 or older for this capability.", + category=FutureWarning, + stacklevel=2, + ) + + if isinstance(model, torch.nn.DataParallel): + raise ValueError( + "torch.nn.DataParallel is not supported by ONNX " + "exporter, please use 'attribute' module to " + "unwrap model from torch.nn.DataParallel. Try " + "torch.onnx.export(model.module, ...)" + ) + + GLOBALS.onnx_shape_inference = onnx_shape_inference + + if opset_version is None: + opset_version = _constants.ONNX_DEFAULT_OPSET + + # torch.onnx.export does not support opset versions >=18 + if opset_version > _constants.ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET: + # We do not want to fail because we should still allow users to create + # custom symbolic functions for opset>17 + warnings.warn( + f"Exporting to ONNX opset version {opset_version} is not supported. " + f"by 'torch.onnx.export()'. " + f"The highest opset version supported is {_constants.ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET}. " + f"To use a newer opset version, consider 'torch.onnx.dynamo_export()'. " + f"Note that dynamo_export() is in preview. Please report errors with " + f"dynamo_export() as Github issues to https://github.com/pytorch/pytorch/issues.", + category=errors.OnnxExporterWarning, + ) + + if export_modules_as_functions and opset_version < 15: + raise ValueError( + "`export_modules_as_functions` is not supported for `opset_version` < 15." + "This is because `opset_version` < 15 implies IR version < 8, which means " + "no local function support. " + ) + if not operator_export_type: + if _C_onnx._CAFFE2_ATEN_FALLBACK: + operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + else: + operator_export_type = _C_onnx.OperatorExportTypes.ONNX + + # By default, training=TrainingMode.EVAL, + # which is good because running a model in training mode could result in + # internal buffers getting updated, dropout getting applied, etc. + # If you really know what you're doing, you can turn + # training=TrainingMode.TRAINING or training=TrainingMode.PRESERVE, + # (to preserve whatever the original training mode was.) + GLOBALS.export_onnx_opset_version = opset_version + GLOBALS.operator_export_type = operator_export_type + + try: + GLOBALS.in_onnx_export = True + _autograd_inlining_previous = GLOBALS.autograd_inlining + GLOBALS.autograd_inlining = autograd_inlining + + module_typenames_to_export_as_functions: Set[str] = set() + if isinstance(model, (torch.nn.Module, torch.jit.ScriptModule)): + module_typenames_to_export_as_functions = _setup_trace_module_map( + model, export_modules_as_functions + ) + + with exporter_context(model, training, verbose): + val_keep_init_as_ip = _decide_keep_init_as_input( + keep_initializers_as_inputs, + operator_export_type, + opset_version, + ) + val_add_node_names = _decide_add_node_names( + add_node_names, operator_export_type + ) + val_do_constant_folding = _decide_constant_folding( + do_constant_folding, operator_export_type, training + ) + # Normally f can be a file-like object, but for large models, the external data format requires a + # valid `model_file_location`. Code in export.cpp will enforce this. + if isinstance(f, str): + model_file_location = f + else: + model_file_location = "" + args = _decide_input_format(model, args) + if dynamic_axes is None: + dynamic_axes = {} + _validate_dynamic_axes(dynamic_axes, model, input_names, output_names) + + graph, params_dict, torch_out = _model_to_graph( + model, + args, + verbose, + input_names, + output_names, + operator_export_type, + val_do_constant_folding, + fixed_batch_size=fixed_batch_size, + training=training, + dynamic_axes=dynamic_axes, + ) + + # TODO: Don't allocate a in-memory string for the protobuf + defer_weight_export = ( + export_type is not _exporter_states.ExportTypes.PROTOBUF_FILE + ) + if custom_opsets is None: + custom_opsets = {} + + _C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph) + node_attr_to_name = {} # type: ignore[var-annotated] + if module_typenames_to_export_as_functions: + # NOTE: cannot call DCE after this pass. DCE will remove function definition nodes. + node_attr_to_name = _C._jit_pass_onnx_function_extraction( + graph, + module_typenames_to_export_as_functions, + list(params_dict.keys()), + ) + + if keep_initializers_as_inputs is not True: + params_dict = _C._jit_pass_onnx_deduplicate_initializers( # type: ignore[assignment] + graph, params_dict, getattr(model, "training", False) # type: ignore[arg-type] + ) + _C._jit_pass_onnx_assign_scoped_names_for_node_and_value(graph) + if export_params: + ( + proto, + export_map, + val_use_external_data_format, + node_names, + ) = graph._export_onnx( # type: ignore[attr-defined] + params_dict, + opset_version, + dynamic_axes, + defer_weight_export, + operator_export_type, + not verbose, + val_keep_init_as_ip, + custom_opsets, + val_add_node_names, + model_file_location, + node_attr_to_name, + ) + else: + ( + proto, + export_map, + val_use_external_data_format, + node_names, + ) = graph._export_onnx( # type: ignore[attr-defined] + {}, + opset_version, + dynamic_axes, + False, + operator_export_type, + not verbose, + val_keep_init_as_ip, + custom_opsets, + val_add_node_names, + model_file_location, + node_attr_to_name, + ) + # insert function_proto into model_proto. + proto = onnx_proto_utils._add_onnxscript_fn( + proto, + custom_opsets, + ) + if verbose: + torch.onnx.log("Exported graph: ", graph) + onnx_proto_utils._export_file(proto, f, export_type, export_map) + # The ONNX checker only works for ONNX graph. So if the operator_export_type is not ONNX, + # we can skip this check. + # If large model format export is enabled, proto will only contain data location instead of + # raw data and _check_onnx_proto() will fail because it can only handle the raw ONNX proto + # string in memory. + if (operator_export_type is _C_onnx.OperatorExportTypes.ONNX) and ( + not val_use_external_data_format + ): + try: + _C._check_onnx_proto(proto) + except RuntimeError as e: + raise errors.CheckerError(e) from e + finally: + assert GLOBALS.in_onnx_export + GLOBALS.in_onnx_export = False + GLOBALS.autograd_inlining = _autograd_inlining_previous + _reset_trace_module_map() + + return torch_out + + +@_beartype.beartype +def _apply_friendly_debug_names(graph, params): + for n in graph.nodes(): + for v in n.inputs(): + old_name = v.debugName() + if old_name != str(v.unique()): + continue + new_name = f"{n.kind()}_{v.unique()}" + v.setDebugName(new_name) + if old_name in params: + params[new_name] = params.pop(old_name) + + +@_beartype.beartype +def _set_input_and_output_names(graph, input_names, output_names): + @_beartype.beartype + def set_names(node_list, name_list, descriptor): + if name_list is None: + return + if len(name_list) > len(node_list): + raise RuntimeError( + "number of %s names provided (%d) exceeded number of %ss (%d)" + % (descriptor, len(name_list), descriptor, len(node_list)) + ) + + # Mark if the output node DebugName is set before. + output_node_set = set() + for i, (name, node) in enumerate(zip(name_list, node_list)): + # Duplicated output node, insert onnx::Identity to avoid setting the same DebugName after setDebugName(). + if descriptor == "output": + if node in output_node_set: + identity_node = graph.create("onnx::Identity") + identity_node.insertAfter(node.node()) + identity_node.addInput(node) + identity_node.output().setType(node.type()) + graph.return_node().replaceInput(i, identity_node.output()) + node = identity_node.output() + output_node_set.add(node) + + if node.debugName() != name: + node.setDebugName(name) + + set_names(list(graph.inputs()), input_names, "input") + set_names(list(graph.outputs()), output_names, "output") + + +@_beartype.beartype +def _run_symbolic_method(g, op_name, symbolic_fn, args): + r""" + This trampoline function gets invoked for every symbolic method + call from C++. + """ + try: + graph_context = jit_utils.GraphContext( + graph=g, + block=g.block(), + opset=GLOBALS.export_onnx_opset_version, + original_node=None, # type: ignore[arg-type] + params_dict=_params_dict, + env={}, + ) + return symbolic_fn(graph_context, *args) + except TypeError as e: + # Handle the specific case where we didn't successfully dispatch + # to symbolic_fn. Otherwise, the backtrace will have the clues + # you need. + e.args = (f"{e.args[0]} (occurred when translating {op_name})",) + raise + + +@_beartype.beartype +def _add_block(node: _C.Node) -> _C.Block: + return node.addBlock() + + +@_beartype.beartype +def _add_input_to_block(block: _C.Block): + return block.addInputToBlock() # type: ignore[attr-defined] + + +@_beartype.beartype +def _add_output_to_block(block: _C.Block, value: _C.Value) -> int: + return block.registerOutput(value) + + +@_beartype.beartype +def _should_aten_fallback( + name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes +): + # For BUILD_CAFFE2=0 builds, if domain=="aten" and operator_export_type==ONNX_ATEN, + # an aten::ATen operator is created regardless of symbolics existence + # For BUILD_CAFFE2=1, the same applies only if there is no symbolic available + + is_exportable_aten_op = registration.registry.is_registered_op(name, opset_version) + is_onnx_aten_export = operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN + is_aten_fallback_export = ( + operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + ) + is_caffe2_build = _C_onnx._CAFFE2_ATEN_FALLBACK + + if not name.startswith("aten::"): + return False + + if is_caffe2_build: + if ( + is_onnx_aten_export or is_aten_fallback_export + ) and not is_exportable_aten_op: + return True + else: + if is_onnx_aten_export or ( + is_aten_fallback_export and not is_exportable_aten_op + ): + return True + + return False + + +@_beartype.beartype +def _need_symbolic_context(symbolic_fn: Callable) -> bool: + """Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`.""" + params = tuple(inspect.signature(symbolic_fn).parameters.values()) + # When the annotation is postpone-evaluated, the annotation is a string + # and not a type. We need to use get_type_hints to get the real type. + if not params: + return False + first_param_name = params[0].name + type_hints = typing.get_type_hints(symbolic_fn) + if first_param_name not in type_hints: + return False + param_type = type_hints[first_param_name] + return issubclass(param_type, _exporter_states.SymbolicContext) + + +@_beartype.beartype +def _symbolic_context_handler(symbolic_fn: Callable) -> Callable: + """Decorator that provides the symbolic context to the symbolic function if needed.""" + if _need_symbolic_context(symbolic_fn): + # TODO(justinchuby): Update the module name of GraphContext when it is public + warnings.warn( + "The first argument to symbolic functions is deprecated in 1.13 and will be " + "removed in the future. Please annotate treat the first argument (g) as GraphContext " + "and use context information from the object instead.", + category=FutureWarning, + ) + + def wrapper(graph_context: jit_utils.GraphContext, *args, **kwargs): + symbolic_context = _exporter_states.SymbolicContext( + params_dict=graph_context.params_dict, + env=graph_context.env, + cur_node=graph_context.original_node, + onnx_block=graph_context.block, + ) + return symbolic_fn(symbolic_context, graph_context, *args, **kwargs) + + return wrapper + return symbolic_fn + + +@_beartype.beartype +def _get_aten_op_overload_name(n: _C.Node) -> str: + # Returns `overload_name` attribute to ATen ops on non-Caffe2 builds + schema = n.schema() + if not schema.startswith("aten::") or symbolic_helper.is_caffe2_aten_fallback(): + return "" + return _C.parse_schema(schema).overload_name + + +@_beartype.beartype +def _run_symbolic_function( + graph: _C.Graph, + block: _C.Block, + node: _C.Node, + inputs: Any, + env: Dict[_C.Value, _C.Value], + operator_export_type=_C_onnx.OperatorExportTypes.ONNX, +) -> Optional[Union[_C.Value, Sequence[Optional[_C.Value]]]]: + """Runs a symbolic function. + + The function is used in C++ to export the node to ONNX. + + Returns: + A single or a tuple of Values. + None when the node gets cloned as is into the new graph. + """ + + opset_version = GLOBALS.export_onnx_opset_version + + # See Note [Export inplace] + node_kind = node.kind() + if node_kind.endswith("_"): + # Treat relu_ -> relu; add_ -> add etc. + ns_op_name = node_kind[:-1] + else: + ns_op_name = node_kind + + namespace, op_name = jit_utils.parse_node_kind(ns_op_name) + + graph_context = jit_utils.GraphContext( + graph=graph, + block=block, + opset=opset_version, + original_node=node, + params_dict=_params_dict, + env=env, + ) + + # Direct ATen export requested + if _should_aten_fallback(ns_op_name, opset_version, operator_export_type): + attrs = { + k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k) + for k in node.attributeNames() + } + outputs = node.outputsSize() + attrs["outputs"] = outputs + return graph_context.aten_op( + op_name, + *inputs, + overload_name=_get_aten_op_overload_name(node), + **attrs, + ) + + try: + # Caffe2-specific: Quantized op symbolics are registered for opset 9 only. + if symbolic_helper.is_caffe2_aten_fallback() and opset_version == 9: + symbolic_caffe2.register_quantized_ops("caffe2", opset_version) + + if namespace == "quantized" and symbolic_helper.is_caffe2_aten_fallback(): + domain = "caffe2" + else: + domain = namespace + symbolic_function_name = f"{domain}::{op_name}" + + symbolic_function_group = registration.registry.get_function_group( + symbolic_function_name + ) + if symbolic_function_group is not None: + symbolic_fn = symbolic_function_group.get(opset_version) + if symbolic_fn is not None: + # TODO Wrap almost identical attrs assignment or comment the difference. + attrs = { + k: symbolic_helper._node_get(node, k) for k in node.attributeNames() + } + return symbolic_fn(graph_context, *inputs, **attrs) + + attrs = { + k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k) + for k in node.attributeNames() + } + if namespace == "onnx": + # Clone node to trigger ONNX shape inference + return graph_context.op(op_name, *inputs, **attrs, outputs=node.outputsSize()) # type: ignore[attr-defined] + + raise errors.UnsupportedOperatorError( + symbolic_function_name, + opset_version, + symbolic_function_group.get_min_supported() + if symbolic_function_group + else None, + ) + + except RuntimeError: + if operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH: + return None + elif ( + operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + and not symbolic_helper.is_caffe2_aten_fallback() + ): + # Emit ATen op for non-Caffe2 builds when `operator_export_type==ONNX_ATEN_FALLBACK` + attrs = { + k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k) + for k in node.attributeNames() + } + return graph_context.aten_op( + op_name, + *inputs, + overload_name=_get_aten_op_overload_name(node), + **attrs, + ) + raise + except TypeError as e: + # Handle the specific case where we didn't successfully dispatch. + # Otherwise, the backtrace will have the clues you need. + e.args = (f"{e.args[0]} \n(Occurred when translating {op_name}).",) + raise + + +@_beartype.beartype +def _verify_custom_op_name(symbolic_name: str): + if not re.match(r"^[a-zA-Z0-9-_]+::[a-zA-Z-_]+[a-zA-Z0-9-_]*$", symbolic_name): + raise errors.OnnxExporterError( + f"Failed to register operator {symbolic_name}. " + "The symbolic name must match the format domain::name, " + "and should start with a letter and contain only " + "alphanumerical characters" + ) + + ns, _ = jit_utils.parse_node_kind(symbolic_name) + if ns == "onnx": + raise ValueError( + f"Failed to register operator {symbolic_name}. {ns} domain cannot be modified." + ) + + +@_beartype.beartype +def register_custom_op_symbolic( + symbolic_name: str, + symbolic_fn: Callable, + opset_version: int, +): + """Registers a symbolic function for a custom operator. + + When the user registers symbolic for custom/contrib ops, + it is highly recommended to add shape inference for that operator via setType API, + otherwise the exported graph may have incorrect shape inference in some extreme cases. + An example of setType is `test_aten_embedding_2` in `test_operators.py`. + + See "Custom Operators" in the module documentation for an example usage. + + Args: + symbolic_name (str): The name of the custom operator in "::" + format. + symbolic_fn (Callable): A function that takes in the ONNX graph and + the input arguments to the current operator, and returns new + operator nodes to add to the graph. + opset_version (int): The ONNX opset version in which to register. + """ + if symbolic_name.startswith("::"): + symbolic_name = f"aten{symbolic_name}" + + _verify_custom_op_name(symbolic_name) + + registration.custom_onnx_symbolic( + symbolic_name, + opset_version, + decorate=[ + _symbolic_context_handler, + ], + )(symbolic_fn) + + +@_beartype.beartype +def unregister_custom_op_symbolic(symbolic_name: str, opset_version: int): + """Unregisters ``symbolic_name``. + + See "Custom Operators" in the module documentation for an example usage. + + Args: + symbolic_name (str): The name of the custom operator in "::" + format. + opset_version (int): The ONNX opset version in which to unregister. + """ + if symbolic_name.startswith("::"): + symbolic_name = f"aten{symbolic_name}" + + _verify_custom_op_name(symbolic_name) + + registration.registry.unregister(symbolic_name, opset_version) + + +@_beartype.beartype +def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names): + """Ensures dynamic axes argument is follows the expected format.""" + if len(dynamic_axes) == 0: + return + + if hasattr(model, "graph"): + # Extracting set of valid input/output names that shall be used for dynamic_axes + if (input_names is None) or len(input_names) == 0: + input_names = [x.debugName() for x in model.graph.inputs()] + if (output_names is None) or len(output_names) == 0: + output_names = [y.debugName() for y in model.graph.outputs()] + + valid_names = set((input_names or []) + (output_names or [])) + + # If dynamic axes are provided as a list rather than dictionary, they should + # first get converted to a dictionary in expected format. If desired axes names + # are not provided for dynamic axes, automatic names shall be generated for + # provided dynamic axes of specified input/output + for key, value in dynamic_axes.items(): + if key not in valid_names: + warnings.warn( + f"Provided key {key} for dynamic axes is not a valid input/output name" + ) + if isinstance(value, list): + warnings.warn( + "No names were found for specified dynamic axes of provided input." + f"Automatically generated names will be applied to each dynamic axes of input {key}" + ) + + value_dict = {} + for i, x in enumerate(value): + if not isinstance(x, int): + raise ValueError( + "The type of axis index is expected to be an integer" + ) + if x in value_dict: + warnings.warn( + f"Duplicate dynamic axis index {x} was provided for input {key}." + ) + else: + value_dict[x] = str(key) + "_dynamic_axes_" + str(i + 1) + dynamic_axes[key] = value_dict + + +def model_signature(model: Union[torch.nn.Module, Callable]) -> inspect.Signature: + return inspect.signature( + model.forward if isinstance(model, torch.nn.Module) else model + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/onnx/verification.py b/env-llmeval/lib/python3.10/site-packages/torch/onnx/verification.py new file mode 100644 index 0000000000000000000000000000000000000000..b60dfe8e1c90742ae03f60406382e00b39818712 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/onnx/verification.py @@ -0,0 +1,1884 @@ +"""Functions to verify exported ONNX model is functionally equivalent to original PyTorch model. + +ONNX Runtime is required, and is used as the ONNX backend for export verification. +""" + +from __future__ import annotations + +import contextlib +import copy +import dataclasses +import datetime +import difflib +import enum +import functools +import io +import itertools +import os +import tempfile +import warnings +from typing import ( + Any, + Callable, + Collection, + Dict, + FrozenSet, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import numpy as np + +import torch +import torch._C._onnx as _C_onnx +from torch import _C +from torch.onnx import _constants, _experimental, _exporter_states, utils +from torch.onnx._globals import GLOBALS +from torch.onnx._internal import _beartype, onnx_proto_utils +from torch.types import Number + +_ORT_PROVIDERS = ("CPUExecutionProvider",) + +_NumericType = Union[Number, torch.Tensor, np.ndarray] +_ModelType = Union[torch.nn.Module, torch.jit.ScriptModule] +_InputArgsType = Union[torch.Tensor, Tuple[Any, ...]] +_InputKwargsType = Mapping[str, Any] +_OutputsType = Union[Sequence[_NumericType], Sequence] + + +class OnnxBackend(enum.Enum): + """Enum class for ONNX backend used for export verification.""" + + REFERENCE = "ONNXReferenceEvaluator" + ONNX_RUNTIME_CPU = "CPUExecutionProvider" + ONNX_RUNTIME_CUDA = "CUDAExecutionProvider" + + +@dataclasses.dataclass +class VerificationOptions: + """Options for ONNX export verification. + + Attributes: + flatten: If True, unpack nested list/tuple/dict inputs into a flattened list of + Tensors for ONNX. Set this to False if nested structures are to be preserved + for ONNX, which is usually the case with exporting ScriptModules. Default True. + ignore_none: Whether to ignore None type in torch output, which is usually the + case with tracing. Set this to False, if torch output should keep None type, + which is usually the case with exporting ScriptModules. Default to True. + check_shape: Whether to check the shapes between PyTorch and ONNX Runtime outputs + are exactly the same. Set this to False to allow output shape broadcasting. + Default to True. + check_dtype: Whether to check the dtypes between PyTorch and ONNX Runtime outputs + are consistent. Default to True. + backend: ONNX backend for verification. Default to OnnxBackend.ONNX_RUNTIME_CPU. + rtol: relative tolerance in comparison between ONNX and PyTorch outputs. + atol: absolute tolerance in comparison between ONNX and PyTorch outputs. + remained_onnx_input_idx: If provided, only the specified inputs will be passed + to the ONNX model. Supply a list when there are unused inputs in the model. + Since unused inputs will be removed in the exported ONNX model, supplying + all inputs will cause an error on unexpected inputs. This parameter tells + the verifier which inputs to pass into the ONNX model. + acceptable_error_percentage: acceptable percentage of element mismatches in comparison. + It should be a float of value between 0.0 and 1.0. + """ + + flatten: bool = True + ignore_none: bool = True + check_shape: bool = True + check_dtype: bool = True + backend: OnnxBackend = OnnxBackend.ONNX_RUNTIME_CPU + rtol: float = 1e-3 + atol: float = 1e-7 + remained_onnx_input_idx: Optional[Sequence[int]] = None + acceptable_error_percentage: Optional[float] = None + + +@_beartype.beartype +def _flatten_tuples(elem): + flattened = [] + for t in elem: + if isinstance(t, tuple): + flattened.extend(_flatten_tuples(t)) + else: + flattened.append(t) + return flattened + + +# TODO(justinchuby): Add type checking by narrowing down the return type when input is None +def _to_numpy(elem) -> Union[list, np.ndarray]: + if isinstance(elem, torch.Tensor): + if elem.requires_grad: + return elem.detach().cpu().numpy() + else: + return elem.cpu().numpy() + elif isinstance(elem, (list, tuple)): + return [_to_numpy(inp) for inp in elem] + elif isinstance(elem, (bool, int, float)): + return np.array(elem) + elif isinstance(elem, dict): + flattened = [] + for k in elem: + flattened.extend([_to_numpy(k), _to_numpy(elem[k])]) + return flattened + return elem + + +@_beartype.beartype +def _inline_flatten_list(inputs, res_list) -> list: + for i in inputs: + res_list.append(i) if not isinstance( + i, (list, tuple) + ) else _inline_flatten_list(i, res_list) + return res_list + + +@_beartype.beartype +def _unpack_to_numpy(values, cast_onnx_accepted=True) -> list: + value_unpacked = [] + for value in values: + value_unpacked.extend( + utils.unpack_quantized_tensor(value, cast_onnx_accepted=cast_onnx_accepted) + ) + return [_to_numpy(v) for v in value_unpacked] + + +@_beartype.beartype +def _run_onnx(onnx_session, inputs) -> _OutputsType: + kw_inputs = {} + if inputs and isinstance(inputs[-1], dict): + kw_inputs = inputs[-1] + inputs = inputs[:-1] + inputs = _unpack_to_numpy(_flatten_tuples(inputs)) + ort_inputs = {} + for input_name, input in kw_inputs.items(): + ort_inputs[input_name] = _to_numpy(input) + inputs = _to_numpy(inputs) + if hasattr(onnx_session, "get_inputs"): + # onnxruntime.InferenceSession + input_names = [i.name for i in onnx_session.get_inputs()] + elif hasattr(onnx_session, "input_names"): + # onnx.reference.ReferenceEvaluator + input_names = onnx_session.input_names + else: + raise ValueError(f"Unknown ONNX backend type: {type(onnx_session)}.") + + for i, input in enumerate(inputs): + if i == len(input_names) or input_names[i] in ort_inputs: + raise ValueError( + f"got too many positional inputs. inputs: {inputs}. kw_inputs: {kw_inputs}. " + f"input names: {input_names}." + ) + ort_inputs[input_names[i]] = input + onnx_outs = onnx_session.run(None, ort_inputs) + return onnx_outs + + +@_beartype.beartype +def _ort_session( + model: Union[str, io.BytesIO], ort_providers: Sequence[str] = _ORT_PROVIDERS +): + try: + import onnxruntime # type: ignore[import] + except ImportError as e: + raise ImportError("onnxruntime is required for export verification.") from e + + if ort_providers is None: + ort_providers = _ORT_PROVIDERS + + session_options = onnxruntime.SessionOptions() + # suppress ort warnings. + # 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2. + session_options.log_severity_level = 3 + ort_session = onnxruntime.InferenceSession( + model if isinstance(model, str) else model.getvalue(), + session_options, + providers=ort_providers, + ) + return ort_session + + +@_beartype.beartype +def _onnx_reference_evaluator_session(model: Union[str, io.BytesIO]): + try: + import onnx + from onnx import reference as onnx_reference # type: ignore[attr-defined] + except ImportError as exc: + raise ImportError("onnx >= 1.13 is required for reference evaluator.") from exc + + proto = ( + onnx.load(model) # type: ignore[attr-defined] + if isinstance(model, str) + else onnx.load_model_from_string(model.getvalue()) # type: ignore[attr-defined] + ) + onnx_session = onnx_reference.ReferenceEvaluator(proto) + return onnx_session + + +@_beartype.beartype +def _onnx_backend_session(model: Union[str, io.BytesIO], backend: OnnxBackend): + if backend == OnnxBackend.REFERENCE: + onnx_session = _onnx_reference_evaluator_session(model) + elif backend in {OnnxBackend.ONNX_RUNTIME_CPU, OnnxBackend.ONNX_RUNTIME_CUDA}: + onnx_session = _ort_session(model, (backend.value,)) + else: + raise ValueError(f"Unsupported backend: {backend}") + return onnx_session + + +@_beartype.beartype +def _compare_onnx_pytorch_outputs_in_np( + onnx_outs: _OutputsType, + pt_outs: _OutputsType, + options: VerificationOptions, +): + assert len(onnx_outs) == len( + pt_outs + ), f"Number of outputs differ ONNX runtime: ({len(onnx_outs)}) PyTorch: ({len(pt_outs)})" + acceptable_error_percentage = options.acceptable_error_percentage + if acceptable_error_percentage and ( + acceptable_error_percentage > 1.0 or acceptable_error_percentage < 0.0 + ): + raise ValueError( + "If set, acceptable_error_percentage should be between 0.0 and 1.0" + ) + + for ort_out, pt_out in zip(onnx_outs, pt_outs): + try: + # TODO: Remove `check_shape` option once every shape inconsistent issue is addressed. + if not options.check_shape: + # Allow different but broadcastable output shapes. + ort_out, pt_out = np.broadcast_arrays(ort_out, pt_out) + torch.testing.assert_close( + ort_out, + pt_out, + rtol=options.rtol, + atol=options.atol, + check_dtype=options.check_dtype, + equal_nan=True, + ) + except AssertionError as e: + if acceptable_error_percentage: + error_percentage = 1 - np.sum( + np.isclose(ort_out, pt_out, rtol=options.rtol, atol=options.atol) + ) / np.prod(ort_out.shape) + if error_percentage <= acceptable_error_percentage: + warnings.warn( + f"Suppressed AssertionError:\n{e}.\n" + f"Error percentage {error_percentage} " + f"within acceptable range {acceptable_error_percentage}." + ) + continue + if ort_out.dtype == np.uint8 or ort_out.dtype == np.int8: + warnings.warn("ONNX output is quantized") + if pt_out.dtype == np.uint8 or pt_out.dtype == np.int8: + warnings.warn("PyTorch output is quantized") + raise + + +@_beartype.beartype +def _compare_onnx_pytorch_outputs( + onnx_outs: _OutputsType, + pt_outs: Any, + options: VerificationOptions, +): + """ + Compare ONNX and PyTorch outputs. + + Args: + onnx_outs: outputs from ONNX backend. + pt_outs: outputs from PyTorch. + options: options for verification. + + Raises: + AssertionError: if outputs from ONNX model and PyTorch model are not + equal up to specified precision. + ValueError: if arguments provided are invalid. + """ + if options.ignore_none: + # torch.jit._flatten filters None type + pt_outs, _ = torch.jit._flatten(pt_outs) + else: + pt_outs = _inline_flatten_list([pt_outs], []) + pt_outs_np = _unpack_to_numpy(pt_outs, cast_onnx_accepted=False) + onnx_outs = _inline_flatten_list(onnx_outs, []) + _compare_onnx_pytorch_outputs_in_np(onnx_outs, pt_outs_np, options) + + +@_beartype.beartype +def _prepare_input_for_pytorch(args, kwargs): + """Prepare input for PyTorch model execution. + + Any future changes/formatting to the input before dispatching to the PyTorch + model should be made in this function. + + Args: + args: positional arguments for PyTorch model forward method. + kwargs: keyword arguments for PyTorch model forward method. + + Returns: + args: positional arguments for PyTorch model forward method. + kwargs: keyword arguments for PyTorch model forward method. + """ + if isinstance(args, (torch.Tensor, dict)): + args = (args,) + # In-place operators will update input tensor data as well. + # Thus inputs are replicated before every forward call. + args = copy.deepcopy(args) + if kwargs: + kwargs = copy.deepcopy(kwargs) + else: + kwargs = {} + return args, kwargs + + +@_beartype.beartype +def _prepare_input_for_export(args, kwargs): + """Prepare input for ONNX model export. + + Any future changes/formatting to the input before dispatching to the + :func:`torch.onnx.export` api should be made in this function. + + Args: + args: positional arguments for PyTorch model forward method. + kwargs: keyword arguments for PyTorch model forward method. + + Returns: + onnx_inputs: positional arguments for ONNX model export, as `args` in + :func:`torch.onnx.export`. + """ + args, kwargs = _prepare_input_for_pytorch(args, kwargs) + if not kwargs and len(args) > 0 and isinstance(args[-1], dict): + onnx_inputs = args + ({},) + elif kwargs: + onnx_inputs = args + (kwargs,) + else: + onnx_inputs = args + return onnx_inputs + + +@_beartype.beartype +def _prepare_input_for_onnx( + args, kwargs, remained_onnx_input_idx: Optional[Sequence[int]], flatten: bool +): + """Prepare input for ONNX model execution in ONNX backend. + + Any future changes/formatting to the input before dispatching to the ONNX backend + run should be made in this function. + + Args: + args: positional arguments for PyTorch model forward method. + kwargs: keyword arguments for PyTorch model forward method. + remained_onnx_input_idx: indices of inputs to be used for ONNX model execution. + flatten: whether to flatten the input before dispatching to the ONNX model execution. + + Returns: + onnx_inputs: positional arguments for ONNX model execution in ONNX backend. + """ + onnx_inputs = _prepare_input_for_export(args, kwargs) + if flatten: + onnx_inputs, _ = torch.jit._flatten(onnx_inputs) + elif onnx_inputs and onnx_inputs[-1] == {}: + # Handle empty kwargs (normally removed by flatten). + onnx_inputs = onnx_inputs[:-1] + if remained_onnx_input_idx is not None: + return [onnx_inputs[i] for i in remained_onnx_input_idx] + else: + return onnx_inputs + + +@_beartype.beartype +def _try_clone_model(model): + """Used for preserving original model in case forward mutates model states.""" + try: + return copy.deepcopy(model) + except Exception: + warnings.warn( + "Failed to clone model. Model state might be mutated during verification." + ) + return model + + +@_beartype.beartype +def _compare_onnx_pytorch_model( + pt_model: _ModelType, + onnx_model_f: Union[str, io.BytesIO], + input_args: _InputArgsType, + input_kwargs: Optional[_InputKwargsType], + additional_test_inputs: Optional[Sequence[_InputArgsType]], + options: VerificationOptions, +): + """Compare outputs from ONNX model runs with outputs from PyTorch model runs. + + Args: + pt_model: PyTorch model. + onnx_model_f: ONNX model file path or file-like object. + input_args: positional arguments for PyTorch model forward method. + input_kwargs: keyword arguments for PyTorch model forward method. + additional_test_inputs: additional positional arguments for PyTorch model + forward method. + options: options for verification. + + Raises: + AssertionError: if outputs from ONNX model and PyTorch model are not + equal up to specified precision. + """ + onnx_session = _onnx_backend_session(onnx_model_f, options.backend) + + @_beartype.beartype + def compare_onnx_pytorch_model_with_input(input_args, input_kwargs): + pt_args, pt_kwargs = _prepare_input_for_pytorch(input_args, input_kwargs) + # TODO: remove this and treat mutating model separately. See #77679 + pt_model_copy = _try_clone_model(pt_model) + pt_outs = pt_model_copy(*pt_args, **pt_kwargs) + + onnx_inputs = _prepare_input_for_onnx( + input_args, input_kwargs, options.remained_onnx_input_idx, options.flatten + ) + + onnx_outs = _run_onnx(onnx_session, onnx_inputs) + + _compare_onnx_pytorch_outputs( + onnx_outs=onnx_outs, + pt_outs=pt_outs, + options=options, + ) + + compare_onnx_pytorch_model_with_input(input_args, input_kwargs) + + if additional_test_inputs: + for test_input_args in additional_test_inputs: + compare_onnx_pytorch_model_with_input(test_input_args, {}) + + +class _GraphDiff: + """A class to represent the difference between two graphs.""" + + @_beartype.beartype + def __init__(self, graph_a: _C.Graph, graph_b: _C.Graph): + """Construct a _GraphDiff object. + + Args: + graph_a (_C.Graph): First graph to compare. + graph_b (_C.Graph): Second graph to compare. + """ + self.graph_a = graph_a + self.graph_b = graph_b + + @_beartype.beartype + def __str__(self): + """See function :func:`diff_report`.""" + return self.diff_report() + + @_beartype.beartype + def _indent(self, lines: str) -> str: + return "\n".join(["\t" + line for line in lines.splitlines()]) + + @_beartype.beartype + def diff_report(self) -> str: + """Return a string representation of the graph difference. + + The report shows the first pair of nodes that diverges. It also shows the source + location of the pair of nodes. + + Returns: + graph_diff_report (str): A string representation of the graph difference. + """ + graph_a = self.graph_a + graph_b = self.graph_b + + graph_a_str = str(graph_a) + graph_b_str = str(graph_b) + + if graph_a_str == graph_b_str: + return "" + + graph_diff = difflib.ndiff( + graph_a_str.splitlines(True), graph_b_str.splitlines(True) + ) + graph_diff_report = ["Graph diff:", self._indent("".join(graph_diff))] + + for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()): + if str(node_a) != str(node_b): + graph_diff_report.append("First diverging operator:") + node_diff = difflib.ndiff( + str(node_a).splitlines(True), str(node_b).splitlines(True) + ) + source_printout = ["node diff:", self._indent("".join(node_diff))] + + stack_a = node_a.sourceRange() if node_a else None + if stack_a: + source_printout.extend( + ["Former source location:", self._indent(str(stack_a))] + ) + stack_b = node_b.sourceRange() if node_b else None + if stack_b: + source_printout.extend( + ["Latter source location:", self._indent(str(stack_b))] + ) + + graph_diff_report.extend(source_printout) + + break + + return "\n".join(graph_diff_report) + + +@_beartype.beartype +def _check_graph_diff( + model: Union[torch.nn.Module, torch.jit.ScriptModule], + test_input_groups: Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]], + export_options: _experimental.ExportOptions, + model_to_graph_func: Callable[ + [ + torch.nn.Module, + Tuple[Any, ...], + Mapping[str, Any], + _experimental.ExportOptions, + ], + _C.Graph, + ], +) -> str: + """Check if graph produced by `model_to_graph_func` is the same across `test_input_groups`. + + Args: + model: See :func:`check_export_model_diff`. + test_input_groups: See :func:`check_export_model_diff`. + export_options: See :func:`check_export_model_diff`. + model_to_graph_func: A function to convert a PyTorch model to a JIT IR graph. + + Returns: + graph_diff_report (str): A string representation of the graph difference. + """ + if len(test_input_groups) < 2: + raise ValueError("Need at least two groups of test inputs to compare.") + + ref_jit_graph = None + for args, kwargs in test_input_groups: + jit_graph = model_to_graph_func(model, args, kwargs, export_options) + if ref_jit_graph is None: + ref_jit_graph = jit_graph + continue + + graph_diff_report = _GraphDiff(ref_jit_graph, jit_graph).diff_report() + if graph_diff_report: + return graph_diff_report + return "" + + +@_beartype.beartype +def _traced_graph_from_model( + model: Union[torch.nn.Module, torch.jit.ScriptModule], + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + export_options: _experimental.ExportOptions, +) -> _C.Graph: + """As part of the ONNX export steps, create a traced JIT graph from a PyTorch model. + + Args: + model: See :func:`check_export_model_diff`. + args: See :func:`check_export_model_diff`. + kwargs: See :func:`check_export_model_diff`. + export_options: See :func:`check_export_model_diff`. + + Returns: + jit_graph (_C.Graph): A traced JIT graph. + """ + training = export_options.training + verbose = export_options.verbose + + with utils.exporter_context(model, training, verbose): + export_inputs = _prepare_input_for_export(args, kwargs) + model = utils._pre_trace_quant_model(model, export_inputs) + jit_graph, _, _, _ = utils._create_jit_graph(model, export_inputs) + return jit_graph + + +@_beartype.beartype +def _onnx_graph_from_model( + model: Union[torch.nn.Module, torch.jit.ScriptModule], + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + export_options: _experimental.ExportOptions, +) -> _C.Graph: + """As part of the ONNX export steps, export an ONNX JIT graph from a PyTorch model. + + Args: + model: See :func:`check_export_model_diff`. + args: See :func:`check_export_model_diff`. + kwargs: See :func:`check_export_model_diff`. + export_options: See :func:`check_export_model_diff`. + + Returns: + onnx_graph (_C.Graph): An ONNX JIT graph. + """ + # TODO: refactor utils.py to remove duplicated code of context setup. See #78834 + opset_version = export_options.opset_version + operator_export_type = export_options.operator_export_type + export_modules_as_functions = export_options.export_modules_as_functions + training = export_options.training + verbose = export_options.verbose + dynamic_axes = export_options.dynamic_axes + input_names = export_options.input_names + output_names = export_options.output_names + + if opset_version is None: + opset_version = _constants.ONNX_DEFAULT_OPSET + + utils._setup_trace_module_map(model, export_modules_as_functions) + + if not operator_export_type: + if _C_onnx._CAFFE2_ATEN_FALLBACK: + operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + else: + operator_export_type = _C_onnx.OperatorExportTypes.ONNX + + GLOBALS.export_onnx_opset_version = opset_version + GLOBALS.operator_export_type = operator_export_type + + with utils.exporter_context(model, training, verbose): + do_constant_folding = utils._decide_constant_folding( + export_options.do_constant_folding, operator_export_type, training + ) + + if dynamic_axes is None: + dynamic_axes = {} + utils._validate_dynamic_axes(dynamic_axes, model, input_names, output_names) + + export_inputs = _prepare_input_for_export(args, kwargs) + export_inputs = utils._decide_input_format(model, export_inputs) + onnx_graph, _, _ = utils._model_to_graph( + model, + export_inputs, + verbose, + input_names, + output_names, + operator_export_type, + do_constant_folding, + training=training, + dynamic_axes=dynamic_axes, + ) + + return onnx_graph + + +@_beartype.beartype +def _onnx_graph_from_aten_graph( + graph: torch.Graph, + export_options: _experimental.ExportOptions, + params_dict: Optional[Dict[str, Any]] = None, +) -> Tuple[torch.Graph, Dict[str, Any]]: + if params_dict is None: + params_dict = {} + operator_export_type = export_options.operator_export_type + dynamic_axes = export_options.dynamic_axes or {} + input_names = export_options.input_names + training = export_options.training + do_constant_folding = export_options.do_constant_folding + opset_version = export_options.opset_version or _constants.ONNX_DEFAULT_OPSET + + GLOBALS.export_onnx_opset_version = opset_version + GLOBALS.operator_export_type = operator_export_type + + do_constant_folding = utils._decide_constant_folding( + do_constant_folding, operator_export_type, training + ) + + # TODO: Below is doing aten graph to onnx. It should be abstracted as a + # function in torch/onnx/utils.py. + graph = graph.copy() + graph = utils._optimize_graph( + graph, + operator_export_type, + params_dict=params_dict, + dynamic_axes=dynamic_axes, + input_names=input_names, + ) + + if training is None or training == _C_onnx.TrainingMode.EVAL: + params_dict = torch._C._jit_pass_onnx_eval_peephole(graph, params_dict) + + if ( + do_constant_folding + and opset_version >= _constants.ONNX_CONSTANT_FOLDING_MIN_OPSET + ): + params_dict = _C._jit_pass_onnx_constant_fold(graph, params_dict, opset_version) + _C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph) + + if GLOBALS.onnx_shape_inference: + _C._jit_pass_onnx_graph_shape_type_inference(graph, params_dict, opset_version) + + params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict) + + # For ONNX opset < 9, constants only have three data types: float16, float, double. + # In this pass transform constants of other data types to float/double + cast operator. + if opset_version < 9: + _C._jit_pass_onnx_cast_all_constant_to_floating(graph) + + params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict) + _C._jit_decay_packed_param_input_types(graph) + + _C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph) + + if export_options.verbose: + print("ONNX graph: ", graph) + + return graph, params_dict + + +@_beartype.beartype +def _onnx_proto_from_onnx_graph( + onnx_graph: torch.Graph, + export_options: _experimental.ExportOptions, + params_dict: Dict[str, Any], +) -> Tuple[bytes, Mapping[str, bytes]]: + opset_version = export_options.opset_version or _constants.ONNX_DEFAULT_OPSET + dynamic_axes = export_options.dynamic_axes or {} + operator_export_type = export_options.operator_export_type + val_keep_init_as_ip = utils._decide_keep_init_as_input( + export_options.keep_initializers_as_inputs, + operator_export_type, + opset_version, + ) + val_add_node_names = utils._decide_add_node_names(True, operator_export_type) + custom_opsets = export_options.custom_opsets or {} + + proto, export_map, _, _ = onnx_graph._export_onnx( # type: ignore[attr-defined] + params_dict, + opset_version, + dynamic_axes, + False, + operator_export_type, + not export_options.verbose, + val_keep_init_as_ip, + custom_opsets, + val_add_node_names, + "", + {}, + ) + + return proto, export_map + + +@_beartype.beartype +def check_export_model_diff( + model: Union[torch.nn.Module, torch.jit.ScriptModule], + test_input_groups: Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]], + export_options: Optional[_experimental.ExportOptions] = None, +) -> str: + """Verify exported model discrepancy between different groups of inputs. + + A graph is exported for each group of inputs. The exported graphs are then compared + to each other, and discrepancies of first pair of nodes are reported. This function + first checks the jit graph. If no discrepancies were found, it then checks the onnx + graph. + + Unless otherwise specified, the jit/ONNX graph is expected to be the same, regardless + of the inputs used for exporting. A discrepancy implies the graph exported is + not accurate when run on other groups of inputs, which will typically results in + runtime errors or mismatching output. + + Args: + model (torch.nn.Module or torch.jit.ScriptModule): The model to be exported. + test_input_groups (Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]]): A sequence + of input groups to be used to export the model. Each input group is a pair of + (args, kwargs). + export_options (_experimental.ExportOptions, optional): An _experimental.ExportOptions + object that controls the export behavior. + + Returns: + str: A string containing the diff of the exported models. + """ + export_options = ( + _experimental.ExportOptions() if export_options is None else export_options + ) + + jit_diff_report = _check_graph_diff( + model, test_input_groups, export_options, _traced_graph_from_model + ) + if jit_diff_report: + return jit_diff_report + + return _check_graph_diff( + model, test_input_groups, export_options, _onnx_graph_from_model + ) + + +@_beartype.beartype +def verify( + model: _ModelType, + input_args: _InputArgsType, + input_kwargs: Optional[_InputKwargsType] = None, + do_constant_folding: bool = True, + dynamic_axes: Optional[ + Mapping[str, Union[Mapping[int, str], Mapping[str, Sequence[int]]]] + ] = None, + input_names: Optional[Sequence[str]] = None, + output_names: Optional[Sequence[str]] = None, + training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL, + opset_version: Optional[int] = None, + keep_initializers_as_inputs: bool = True, + verbose: bool = False, + fixed_batch_size: bool = False, + use_external_data: bool = False, + additional_test_inputs: Optional[Sequence[_InputArgsType]] = None, + options: Optional[VerificationOptions] = None, +): + """Verify model export to ONNX against original PyTorch model. + + Args: + model (torch.nn.Module or torch.jit.ScriptModule): See :func:`torch.onnx.export`. + input_args (tuple): See :func:`torch.onnx.export`. + input_kwargs (dict): See :func:`torch.onnx.export`. + do_constant_folding (bool, optional): See :func:`torch.onnx.export`. + dynamic_axes (dict, optional): See :func:`torch.onnx.export`. + input_names (list, optional): See :func:`torch.onnx.export`. + output_names (list, optional): See :func:`torch.onnx.export`. + training (torch.onnx.TrainingMode): See :func:`torch.onnx.export`. + opset_version (int, optional): See :func:`torch.onnx.export`. + keep_initializers_as_inputs (bool, optional): See :func:`torch.onnx.export`. + verbose (bool, optional): See :func:`torch.onnx.export`. + fixed_batch_size (bool, optional): Legacy argument, used only by rnn test cases. + use_external_data (bool, optional): Explicitly specify whether to export the + model with external data. + additional_test_inputs (list, optional): List of tuples. Each tuple is a group of + input arguments to test. Currently only *args are supported. + options (_VerificationOptions, optional): A _VerificationOptions object that + controls the verification behavior. + + Raises: + AssertionError: if outputs from ONNX model and PyTorch model are not + equal up to specified precision. + ValueError: if arguments provided are invalid. + """ + if options is None: + options = VerificationOptions() + + if training == torch.onnx.TrainingMode.TRAINING: + model.train() + elif training == torch.onnx.TrainingMode.EVAL: + model.eval() + with torch.no_grad(), contextlib.ExitStack() as stack: + model_f: Union[str, io.BytesIO] = io.BytesIO() + if use_external_data: + tmpdir_path = stack.enter_context(tempfile.TemporaryDirectory()) + model_f = os.path.join(tmpdir_path, "model.onnx") + + inputs_for_export = _prepare_input_for_export(input_args, input_kwargs) + + # TODO(#77679): remove this and treat mutating model separately. + model_copy = _try_clone_model(model) + utils._export( + model, + inputs_for_export, + model_f, + opset_version=opset_version, + do_constant_folding=do_constant_folding, + keep_initializers_as_inputs=keep_initializers_as_inputs, + dynamic_axes=dynamic_axes, + input_names=input_names, + output_names=output_names, + fixed_batch_size=fixed_batch_size, + training=training, + verbose=verbose, + ) + + _compare_onnx_pytorch_model( + pt_model=model_copy, + onnx_model_f=model_f, + input_args=input_args, + input_kwargs=input_kwargs, + additional_test_inputs=additional_test_inputs, + options=options, + ) + + +@_beartype.beartype +def verify_aten_graph( + graph: torch.Graph, + input_args: Tuple[Any, ...], + export_options: _experimental.ExportOptions, + params_dict: Optional[Dict[str, Any]] = None, + verification_options: Optional[VerificationOptions] = None, +) -> Tuple[Optional[AssertionError], torch.Graph, _OutputsType, _OutputsType]: + if verification_options is None: + verification_options = VerificationOptions() + if params_dict is None: + params_dict = {} + + original_jit_graph = graph + graph = graph.copy() + + # Execute aten graph and get reference torch jit outputs. + graph_inputs = list(graph.inputs()) + jit_inputs = tuple([arg for arg in input_args if arg is not None]) + weights = [params_dict[v.debugName()] for v in graph_inputs[len(jit_inputs) :]] + assert all(w is not None for w in weights) + # TODO: Only copy the argument if mutation is detected in Graph. + jit_inputs = copy.deepcopy(jit_inputs) + jit_input_and_parameters = jit_inputs + tuple(weights) + jit_outs = torch._C._jit_interpret_graph(graph, jit_input_and_parameters) # type: ignore[attr-defined] + if not isinstance(jit_outs, (list, tuple)): + jit_outs = [jit_outs] + + # Convert aten graph to onnx graph. + graph, onnx_params_dict = _onnx_graph_from_aten_graph( + graph, export_options, params_dict + ) + + proto, export_map = _onnx_proto_from_onnx_graph( + graph, export_options, onnx_params_dict + ) + model_f: Union[str, io.BytesIO] = io.BytesIO() + export_type = _exporter_states.ExportTypes.PROTOBUF_FILE + onnx_proto_utils._export_file(proto, model_f, export_type, export_map) + + # NOTE: Verification is unstable. Try catch to emit information for debugging. + try: + # NOTE: Input might be dce'ed, so we need to remove those from the input args. + new_input_names = {v.debugName() for v in graph.inputs()} + new_input_args = [] + for v, arg in zip(original_jit_graph.inputs(), input_args): + if v.debugName() in new_input_names: + new_input_args.append(arg) + input_args = tuple(new_input_args) + + onnx_inputs = _prepare_input_for_onnx( + input_args, + {}, + verification_options.remained_onnx_input_idx, + verification_options.flatten, + ) + + onnx_session = _onnx_backend_session(model_f, verification_options.backend) + onnx_outs = _run_onnx(onnx_session, onnx_inputs) + del onnx_session # To free device memory + + try: + _compare_onnx_pytorch_outputs( + onnx_outs=onnx_outs, + pt_outs=jit_outs, + options=verification_options, + ) + except AssertionError as e: + return e, graph, jit_outs, onnx_outs + + return None, graph, jit_outs, onnx_outs + + except Exception as e: + print("Unexpected error during verification.") + print("jit graph: ", original_jit_graph) + print("onnx graph: ", graph) + raise e + + +class GraphInfoPrettyPrinter: + graph_info: Optional[GraphInfo] + upper_printer: Optional[GraphInfoPrettyPrinter] + lower_printer: Optional[GraphInfoPrettyPrinter] + + graph_str_lambdas: Mapping[int, str] + connector_str_lambdas: Mapping[int, str] + children_str_lambdas: Mapping[int, str] + + def __init__(self, graph_info: Optional[GraphInfo]): + self.graph_info = graph_info + if ( + graph_info is not None + and graph_info.upper_graph_info is not None + and graph_info.lower_graph_info is not None + ): + self.upper_printer = GraphInfoPrettyPrinter(graph_info.upper_graph_info) + self.lower_printer = GraphInfoPrettyPrinter(graph_info.lower_graph_info) + else: + self.upper_printer = None + self.lower_printer = None + + @_beartype.beartype + def _total_rows(self) -> int: + if self.graph_info is None: + return 1 + if self.upper_printer and self.lower_printer: + return ( + self.upper_printer._total_rows() + self.lower_printer._total_rows() + 1 + ) + return 2 # Two lines: node count + id. + + @_beartype.beartype + def _node_count_segment_str(self) -> str: + if self.graph_info is None: + return "..." + node_count = self.graph_info.essential_node_count() + has_mismatch = self.graph_info.has_mismatch() + error_node_kind = ( + f"({self.graph_info.essential_node_kinds().pop()})" + if node_count == 1 and has_mismatch + else "" + ) + + return f"{node_count} {'X' if has_mismatch else '✓'} {error_node_kind}" + + @_beartype.beartype + def _graph_id_segment_str(self) -> str: + if self.graph_info is None: + return "" + return f"id: {self.graph_info.id}" + + @_beartype.beartype + def _max_segment_columns(self) -> int: + return max( + map(len, (self._node_count_segment_str(), self._graph_id_segment_str())) + ) + + @_beartype.beartype + def _graph_segment_str_at_line(self, line: int) -> str: + """Get the string representation of the graph segment at the given line.""" + if line == 0: + result_str = self._node_count_segment_str() + result_str += " " * (self._max_segment_columns() - len(result_str)) + return result_str + if line == 1: + result_str = self._graph_id_segment_str() + result_str += " " * (self._max_segment_columns() - len(result_str)) + return result_str + if 0 <= line < self._total_rows(): + return " " * self._max_segment_columns() + return "" + + @_beartype.beartype + def _connector_segment_str_at_line(self, line: int) -> str: + """Get the connector segment string at the given line.""" + if self.upper_printer is None and self.lower_printer is None: + return "" + upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1 + lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1 + if line == 0: + return " __" + elif line < upper_total_rows + 1: + return " | " + elif line == upper_total_rows + 1: + return " |__" + elif line < upper_total_rows + lower_total_rows + 1: + return " " + return "" + + @_beartype.beartype + def _children_str_at_line(self, line: int) -> str: + """Get the string representation of the children at the given line. + + Recursively calls `_str_at_line` on children nodes. + """ + if self.upper_printer is None and self.lower_printer is None: + return "" + upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1 + lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1 + if 0 <= line < upper_total_rows: + return ( + self.upper_printer._str_at_line(line) if self.upper_printer else "..." + ) + elif upper_total_rows < line < upper_total_rows + lower_total_rows + 1: + return ( + self.lower_printer._str_at_line(line - upper_total_rows - 1) + if self.lower_printer + else "..." + ) + return "" + + @_beartype.beartype + def _str_at_line(self, line: int) -> str: + """Get the string representation of the graph at the given line.""" + return ( + self._graph_segment_str_at_line(line) + + self._connector_segment_str_at_line(line) + + self._children_str_at_line(line) + ) + + def pretty_print(self): + if self.graph_info is None: + print(None) + return + # Print tree. + print(" Tree: ".center(80, "=")) + total_rows = self._total_rows() + for line in range(total_rows): + print(self._str_at_line(line).rstrip()) + if self.graph_info.has_mismatch(): + # Summarize leaf subgraphs with mismatch. + print(" Mismatch leaf subgraphs: ".center(80, "=")) + print( + [ + graph_info.id + for graph_info in self.graph_info.all_mismatch_leaf_graph_info() + ] + ) + # Summarize node kinds with mismatch. + mismatch_node_kinds: Dict[str, int] = {} + for graph_info in self.graph_info.all_mismatch_leaf_graph_info(): + node_kinds = graph_info.essential_node_kinds() + if len(node_kinds) == 1: + node_kind = node_kinds.pop() + mismatch_node_kinds[node_kind] = ( + mismatch_node_kinds.get(node_kind, 0) + 1 + ) + print(" Mismatch node kinds: ".center(80, "=")) + print(mismatch_node_kinds) + else: + print(" No mismatch found. ".center(80, "=")) + + +class OnnxTestCaseRepro: + def __init__(self, repro_dir): + self.repro_dir = repro_dir + self.proto, self.inputs, self.outputs = onnx_proto_utils.load_test_case( + repro_dir + ) + + @classmethod + @_beartype.beartype + def create_test_case_repro( + cls, proto: bytes, inputs, outputs, dir: str, name: Optional[str] = None + ): + """Create a repro under "{dir}/test_{name}" for an ONNX test case. + + The test case contains the model and the inputs/outputs data. The directory + structure is as follows: + + dir + ├── test_ + │ ├── model.onnx + │ └── test_data_set_0 + │ ├── input_0.pb + │ ├── input_1.pb + │ ├── output_0.pb + │ └── output_1.pb + + Args: + proto: ONNX model proto. + inputs: Inputs to the model. + outputs: Outputs of the model. + dir: Directory to save the repro. + name: Name of the test case. If not specified, a name based on current time + will be generated. + Returns: + Path to the repro. + """ + if name is None: + name = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f") + return onnx_proto_utils.export_as_test_case( + proto, + _to_numpy(inputs), + _to_numpy(outputs), + name, + dir, + ) + + @_beartype.beartype + def validate(self, options: VerificationOptions): + """Run the ONNX test case with options.backend, and compare with the expected outputs. + + Args: + options: Options for validation. + + Raise: + AssertionError: if outputs from options.backend and expected outputs are not + equal up to specified precision. + """ + onnx_session = _onnx_backend_session(io.BytesIO(self.proto), options.backend) + run_outputs = onnx_session.run(None, self.inputs) + if hasattr(onnx_session, "get_outputs"): + output_names = [o.name for o in onnx_session.get_outputs()] + elif hasattr(onnx_session, "output_names"): + output_names = onnx_session.output_names + else: + raise ValueError(f"Unknown onnx session type: {type(onnx_session)}") + expected_outs = [self.outputs[name] for name in output_names] + _compare_onnx_pytorch_outputs_in_np(run_outputs, expected_outs, options) + + +@dataclasses.dataclass +class GraphInfo: + """GraphInfo contains validation information of a TorchScript graph and its converted ONNX graph.""" + + graph: torch.Graph + input_args: Tuple[Any, ...] + params_dict: Dict[str, Any] + export_options: _experimental.ExportOptions = dataclasses.field( + default_factory=_experimental.ExportOptions + ) + mismatch_error: Optional[AssertionError] = dataclasses.field( + default=None, init=False + ) + pt_outs: Optional[Sequence[_NumericType]] = dataclasses.field( + default=None, init=False + ) + upper_graph_info: Optional[GraphInfo] = dataclasses.field(default=None, init=False) + lower_graph_info: Optional[GraphInfo] = dataclasses.field(default=None, init=False) + id: str = dataclasses.field(default="") + _onnx_graph: Optional[torch.Graph] = dataclasses.field(init=False, default=None) + + _EXCLUDED_NODE_KINDS: FrozenSet[str] = frozenset( + {"prim::Constant", "prim::ListConstruct", "aten::ScalarImplicit"} + ) + + def clear(self): + """Clear states and results of previous verification.""" + self.mismatch_error = None + self.pt_outs = None + self._onnx_graph = None + self.upper_graph_info = None + self.lower_graph_info = None + + def pretty_print_tree(self): + """Pretty print `GraphInfo` tree. + + Each node represents a subgraph, showing the number of nodes in the subgraph and + a check mark if the subgraph has output mismatch between torch and ONNX. + + The id of the subgraph is shown under the node. The `GraphInfo` object for any + subgraph can be retrieved by calling `graph_info.find_partition(id)`. + + Example:: + + ==================================== Tree: ===================================== + 5 X __2 X __1 ✓ + id: | id: 0 | id: 00 + | | + | |__1 X (aten::relu) + | id: 01 + | + |__3 X __1 ✓ + id: 1 | id: 10 + | + |__2 X __1 X (aten::relu) + id: 11 | id: 110 + | + |__1 ✓ + id: 111 + =========================== Mismatch leaf subgraphs: =========================== + ['01', '110'] + ============================= Mismatch node kinds: ============================= + {'aten::relu': 2} + + """ + GraphInfoPrettyPrinter(self).pretty_print() + + def pretty_print_mismatch(self, graph: bool = False): + """Pretty print details of the mismatch between torch and ONNX. + + Args: + graph: If True, print the ATen JIT graph and ONNX graph. + """ + print(f" Mismatch info for graph partition {self.id}: ".center(80, "=")) + if graph: + print(" ATen JIT graph ".center(80, "=")) + # TODO: A more compact graph printer. + # * Drop stride, grad, device information. + # * Show source location on a separate line. + print(self.graph) + if self._onnx_graph is not None: + print(" ONNX graph ".center(80, "=")) + print(self._onnx_graph) + if self.has_mismatch(): + print(" Mismatch error ".center(80, "=")) + print(self.mismatch_error) + else: + print(" No mismatch ".center(80, "=")) + + @_beartype.beartype + def has_mismatch(self) -> bool: + """Return True if the subgraph has output mismatch between torch and ONNX.""" + return self.mismatch_error is not None + + @_beartype.beartype + def essential_node_count(self) -> int: + """Return the number of nodes in the subgraph excluding those in `_EXCLUDED_NODE_KINDS`.""" + return sum( + 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS + ) + + @_beartype.beartype + def essential_node_kinds(self) -> Set[str]: + """Return the set of node kinds in the subgraph excluding those in `_EXCLUDED_NODE_KINDS`.""" + return { + n.kind() + for n in self.graph.nodes() + if n.kind() not in self._EXCLUDED_NODE_KINDS + } + + @_beartype.beartype + def all_mismatch_leaf_graph_info(self) -> List["GraphInfo"]: + """Return a list of all leaf `GraphInfo` objects that have mismatch.""" + if not self.has_mismatch(): + return [] + + no_mismatch_children = ( + self.upper_graph_info is None or not self.upper_graph_info.has_mismatch() + ) and ( + self.lower_graph_info is None or not self.lower_graph_info.has_mismatch() + ) + + if no_mismatch_children: + return [self] + + results = [] + if self.upper_graph_info is not None: + results += self.upper_graph_info.all_mismatch_leaf_graph_info() + if self.lower_graph_info is not None: + results += self.lower_graph_info.all_mismatch_leaf_graph_info() + + return results + + @_beartype.beartype + def find_partition(self, id: str) -> Optional["GraphInfo"]: + """Find the `GraphInfo` object with the given id.""" + if id == self.id: + return self + current_length = len(self.id) + if len(id) > current_length: + if id[current_length] == "0" and self.upper_graph_info is not None: + return self.upper_graph_info.find_partition(id) + elif id[current_length] == "1" and self.lower_graph_info is not None: + return self.lower_graph_info.find_partition(id) + return None + + @_beartype.beartype + def export_repro( + self, repro_dir: Optional[str] = None, name: Optional[str] = None + ) -> str: + """Export the subgraph to ONNX along with the input/output data for repro. + + The repro directory will contain the following files:: + + dir + ├── test_ + │ ├── model.onnx + │ └── test_data_set_0 + │ ├── input_0.pb + │ ├── input_1.pb + │ ├── output_0.pb + │ └── output_1.pb + + Args: + repro_dir: The directory to export the repro files to. Defaults to current + working directory if None. + name: An optional name for the test case folder: "test_{name}". + + Returns: + The path to the exported repro directory. + """ + + if repro_dir is None: + repro_dir = os.getcwd() + repro_dir = os.path.join(repro_dir, "onnx_debug") + + onnx_graph, onnx_params_dict = _onnx_graph_from_aten_graph( + self.graph, self.export_options, self.params_dict + ) + + proto, _ = _onnx_proto_from_onnx_graph( + onnx_graph, self.export_options, onnx_params_dict + ) + return OnnxTestCaseRepro.create_test_case_repro( + proto, self.input_args, self.pt_outs, repro_dir, name + ) + + @_beartype.beartype + def _graph_partition_pivot(self) -> int: + """Find the pivot index to partition the graph. + + The pivot is the node that splits the graph into two parts. Each part should + have the similar amount of nodes, excluding non essential ops, defined in + `_EXCLUDED_NODE_KINDS`, such as `prim::Constant`. + If the graph has an odd number of nodes, the upper part will have one more node. + If the graph does not have any node that can be partitioned, return -1. + + Returns: + The index of the pivot node. + """ + included_node_indices = [ + i + for i, n in enumerate(self.graph.nodes()) + if n.kind() not in self._EXCLUDED_NODE_KINDS + ] + half_idx = len(included_node_indices) // 2 - 1 + if half_idx >= 0 and len(included_node_indices) > half_idx: + return included_node_indices[half_idx] + 1 + return -1 + + @_beartype.beartype + def _partition_upper_graph(self) -> torch.Graph: + pivot = self._graph_partition_pivot() + if pivot == -1: + return torch.Graph() + graph = self.graph.copy() # Copy to not mutate parent graph. + original_outputs = list(graph.outputs()) + + def _process_bridge_value_for_upper( + new_outputs: List[torch.Value], bridge_value: torch.Value + ) -> torch.Value: + # Add bridge values as upper graph outputs. + new_outputs.append(bridge_value) + return bridge_value + + new_outputs: List[torch.Value] = [] + process_bridge_value_for_upper = functools.partial( + _process_bridge_value_for_upper, new_outputs + ) + _, dropped_nodes, complete_upper_nodes_set, _ = self._partition_nodes( + graph, pivot, process_bridge_value_for_upper + ) + + for _ in enumerate(original_outputs): + graph.eraseOutput(0) + for output in new_outputs: + graph.registerOutput(output) + + for node in reversed(dropped_nodes): + node.destroy() + + for i, input in reversed(list(enumerate(list(graph.inputs())))): + if ( + not _has_uses_by_nodes(input, complete_upper_nodes_set) + and input not in new_outputs + ): + try: + graph.eraseInput(i) + except RuntimeError as e: + print(input, graph) + raise e + + return graph + + @_beartype.beartype + def _partition_lower_graph(self) -> torch.Graph: + pivot = self._graph_partition_pivot() + if pivot == -1: + return torch.Graph() + graph = self.graph.copy() # Copy to not mutate parent graph. + original_outputs = list(graph.outputs()) + original_inputs = list(graph.inputs()) + + new_outputs = [] + + def _process_bridge_value_for_lower( + graph: torch.Graph, bridge_value: torch.Value + ) -> torch.Value: + # Add bridge values as lower graph inputs. + new_input = graph.addInput() + bridge_value.replaceAllUsesWith(new_input) + new_input.copyMetadata(bridge_value) + return new_input + + process_bridge_value_for_lower = functools.partial( + _process_bridge_value_for_lower, graph + ) + + upper_nodes, lower_nodes, _, complete_lower_nodes_set = self._partition_nodes( + graph, pivot, process_bridge_value_for_lower + ) + + for output in original_outputs: + if _produced_by(output, lower_nodes): + new_outputs.append(output) + for _ in enumerate(original_outputs): + graph.eraseOutput(0) + for output in new_outputs: + graph.registerOutput(output) + + for input in original_inputs: + if _has_uses_by_nodes(input, complete_lower_nodes_set): + new_input = graph.addInput() + input.replaceAllUsesWith(new_input) + new_input.copyMetadata(input) + + for node in reversed(upper_nodes): + if node not in complete_lower_nodes_set: + try: + node.destroy() + except RuntimeError as e: + print(node, graph) + raise e + + for _ in original_inputs: + graph.eraseInput(0) + + return graph + + @_beartype.beartype + def _partition_node( + self, + node: torch.Node, + complete_upper_nodes_set: Set[torch.Node], + complete_lower_nodes_set: Set[torch.Node], + original_graph_outputs: Set[torch.Value], + covered_bridge_values: Set[torch.Value], + process_bridge_value: Callable[[torch.Value], torch.Value], + ): + if node in complete_lower_nodes_set: + return + + if ( + _node_has_uses_by(node, complete_lower_nodes_set) + and node.kind() in self._EXCLUDED_NODE_KINDS + ): + complete_lower_nodes_set.update(_all_nodes([node])) + for input in node.inputs(): + if input in covered_bridge_values: + continue + self._partition_node( + input.node(), + complete_upper_nodes_set, + complete_lower_nodes_set, + original_graph_outputs, + covered_bridge_values, + process_bridge_value, + ) + else: + for output in node.outputs(): + if output in covered_bridge_values: + continue + if ( + _has_uses_by_nodes(output, complete_lower_nodes_set) + or output in original_graph_outputs + ): + covered_bridge_values.add(process_bridge_value(output)) + + @_beartype.beartype + def _partition_nodes( + self, + graph: torch.Graph, + pivot: int, + process_bridge_value: Callable[[torch.Value], torch.Value], + ) -> Tuple[List[torch.Node], List[torch.Node], Set[torch.Node], Set[torch.Node]]: + nodes = list(graph.nodes()) + upper_nodes = nodes[:pivot] + lower_nodes = nodes[pivot:] + # `upper_nodes` and `complete_upper_nodes_set` differs in that the latter + # recursively contains nodes in subblock of `upper_nodes`. + # The same applies for `lower_nodes` and `complete_lower_nodes_set`. + # With addition that `complete_lower_nodes_set` will include nodes that + # are determined to be copied from `upper_nodes` to `lower_nodes`. + complete_upper_nodes_set = _all_nodes(upper_nodes) + complete_lower_nodes_set = _all_nodes(lower_nodes) + original_graph_outputs = set(graph.outputs()) + # Bridge values are values produced from upper graph, and consumed + # by lower graph. These values need to be become upper graph outputs + # and lower graph inputs, to bridge the interaction. + # Start with all graph inputs marked as covered. If any graph input is + # needed by lower graph, just keep it in lower graph inputs later. + covered_bridge_values = set(graph.inputs()) + for node in upper_nodes: + self._partition_node( + node, + complete_upper_nodes_set, + complete_lower_nodes_set, + original_graph_outputs, + covered_bridge_values, + process_bridge_value, + ) + return ( + upper_nodes, + lower_nodes, + complete_upper_nodes_set, + complete_lower_nodes_set, + ) + + @_beartype.beartype + def _bridge_kwargs(self): + pt_outs = self.pt_outs + graph_outputs = list(self.graph.outputs()) + assert pt_outs is not None + assert len(graph_outputs) == len( + pt_outs + ), f"{len(graph_outputs)} vs {len(pt_outs)}\nGraph: {self.graph}" + return {v.debugName(): o for v, o in zip(graph_outputs, pt_outs)} + + @_beartype.beartype + def _args_and_params_for_partition_graph( + self, + graph: torch.Graph, + bridge_kwargs: Mapping[str, Union[_NumericType, Sequence[_NumericType]]], + full_kwargs: Mapping[str, torch.Tensor], + full_params: Mapping[str, torch.Tensor], + ): + input_names = [input.debugName() for input in graph.inputs()] + args = tuple(bridge_kwargs[k] for k in input_names if k in bridge_kwargs) + args += tuple(full_kwargs[k] for k in input_names if k in full_kwargs) + params = {k: full_params[k] for k in input_names if k in full_params} + assert len(args) + len(params) == len( + input_names + ), f"{len(args)} + {len(params)} vs {len(input_names)}: {input_names}" + return args, params + + @_beartype.beartype + def verify_export( + self, options: VerificationOptions + ) -> Tuple[Optional[AssertionError], torch.Graph, _OutputsType, _OutputsType]: + """ + Verify the export from TorchScript IR graph to ONNX. + + Export the TorchScript IR graph to ONNX, with the inputs, parameters and export + options recorded in this object. Then verify the exported ONNX graph against + the original TorchScript IR graph under the provided verification options. + + Args: + options: The verification options. + + Returns: + error: The AssertionError raised during the verification. Returns None if no + error is raised. + onnx_graph: The exported ONNX graph in TorchScript IR format. + onnx_outs: The outputs from running exported ONNX model under the onnx + backend in `options`. + pt_outs: The outputs from running the TorchScript IR graph. + """ + return verify_aten_graph( + self.graph, + input_args=self.input_args, + params_dict=self.params_dict, + export_options=self.export_options, + verification_options=options, + ) + + @_beartype.beartype + def find_mismatch( + self, + options: Optional[VerificationOptions] = None, + ): + """ + Find all mismatches between the TorchScript IR graph and the exported onnx model. + + Binary searches the model graph to find the minimal subgraph that exhibits the + mismatch. A `GraphInfo` object is created for each subgraph, recording the test + inputs and export options, as well as the validation results. + + Args: + options: The verification options. + """ + self.clear() + + if options is None: + options = VerificationOptions() + + if self.export_options.verbose: + print(self.graph) + + if len(list(self.graph.outputs())) == 0: + return + + assert len(self.input_args) + len(self.params_dict) == len( + list(self.graph.inputs()) + ), ( + f"Number of graph inputs({len(list(self.graph.inputs()))}) does not match " + f"the provided tensor arguments({len(self.input_args)} + {len(self.params_dict)})." + ) + + self.mismatch_error, self._onnx_graph, self.pt_outs, _ = self.verify_export( + options + ) + + if self.mismatch_error is None: + # No mismatch found in graph. + return + + if self.essential_node_count() <= 1: + # Reached leaf node, no more partitioning. + return + + full_kwargs = { + k.debugName(): v for k, v in zip(self.graph.inputs(), self.input_args) + } + full_params = self.params_dict + + upper_graph = self._partition_upper_graph() + upper_args, upper_params = self._args_and_params_for_partition_graph( + upper_graph, {}, full_kwargs, full_params + ) + self.upper_graph_info = GraphInfo( + upper_graph, + upper_args, + upper_params, + self.export_options, + id=self.id + "0", + ) + + self.upper_graph_info.find_mismatch(options) + + bridge_kwargs = self.upper_graph_info._bridge_kwargs() + lower_graph = self._partition_lower_graph() + lower_args, lower_params = self._args_and_params_for_partition_graph( + lower_graph, bridge_kwargs, full_kwargs, full_params + ) + self.lower_graph_info = GraphInfo( + lower_graph, + lower_args, + lower_params, + self.export_options, + id=self.id + "1", + ) + + self.lower_graph_info.find_mismatch(options) + + +@_beartype.beartype +def _all_nodes(nodes: Collection[torch.Node]) -> Set[torch.Node]: + all_nodes = set(nodes) + for n in nodes: + for b in n.blocks(): + all_nodes.update(_all_nodes(list(b.nodes()))) + return all_nodes + + +@_beartype.beartype +def _has_uses_by_nodes(value: torch.Value, nodes: Collection[torch.Node]) -> bool: + if any(use.user in nodes for use in value.uses()): + return True + return False + + +@_beartype.beartype +def _node_has_uses_by(node: torch.Node, nodes: Collection[torch.Node]) -> bool: + for output in node.outputs(): + if _has_uses_by_nodes(output, nodes): + return True + return False + + +@_beartype.beartype +def _produced_by(value: torch.Value, nodes: Collection[torch.Node]) -> bool: + return value.node() in nodes + + +@_beartype.beartype +def find_mismatch( + model: Union[torch.nn.Module, torch.jit.ScriptModule], + input_args: Tuple[Any, ...], + do_constant_folding: bool = True, + training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL, + opset_version: Optional[int] = None, + keep_initializers_as_inputs: bool = True, + verbose: bool = False, + options: Optional[VerificationOptions] = None, +) -> GraphInfo: + r"""Find all mismatches between the original model and the exported model. + + Experimental. The API is subject to change. + + This tool helps debug the mismatch between the original PyTorch model and exported + ONNX model. It binary searches the model graph to find the minimal subgraph that + exhibits the mismatch. + + Args: + model: The model to be exported. + input_args: The input arguments to the model. + do_constant_folding: Same as `do_constant_folding` in :func:`torch.onnx.export`. + training: Same as `training` in :func:`torch.onnx.export`. + opset_version: Same as `opset_version` in :func:`torch.onnx.export`. + keep_initializers_as_inputs: Same as `keep_initializers_as_inputs` in :func:`torch.onnx.export`. + verbose: Same as `verbose` in :func:`torch.onnx.export`. + options: The options for the mismatch verification. + + Returns: + A GraphInfo object that contains the mismatch information. + + Example:: + + >>> import torch + >>> import torch.onnx.verification + >>> torch.manual_seed(0) + >>> opset_version = 15 + >>> # Define a custom symbolic function for aten::relu. + >>> # The custom symbolic function is incorrect, which will result in mismatches. + >>> def incorrect_relu_symbolic_function(g, self): + ... return self + >>> torch.onnx.register_custom_op_symbolic( + ... "aten::relu", + ... incorrect_relu_symbolic_function, + ... opset_version=opset_version, + ... ) + >>> class Model(torch.nn.Module): + ... def __init__(self): + ... super().__init__() + ... self.layers = torch.nn.Sequential( + ... torch.nn.Linear(3, 4), + ... torch.nn.ReLU(), + ... torch.nn.Linear(4, 5), + ... torch.nn.ReLU(), + ... torch.nn.Linear(5, 6), + ... ) + ... def forward(self, x): + ... return self.layers(x) + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX) + >>> graph_info = torch.onnx.verification.find_mismatch( + ... Model(), + ... (torch.randn(2, 3),), + ... opset_version=opset_version, + ... ) + ===================== Mismatch info for graph partition : ====================== + ================================ Mismatch error ================================ + Tensor-likes are not close! + Mismatched elements: 12 / 12 (100.0%) + Greatest absolute difference: 0.2328854203224182 at index (1, 2) (up to 1e-07 allowed) + Greatest relative difference: 0.699536174352349 at index (1, 3) (up to 0.001 allowed) + ==================================== Tree: ===================================== + 5 X __2 X __1 ✓ + id: | id: 0 | id: 00 + | | + | |__1 X (aten::relu) + | id: 01 + | + |__3 X __1 ✓ + id: 1 | id: 10 + | + |__2 X __1 X (aten::relu) + id: 11 | id: 110 + | + |__1 ✓ + id: 111 + =========================== Mismatch leaf subgraphs: =========================== + ['01', '110'] + ============================= Mismatch node kinds: ============================= + {'aten::relu': 2} + + """ + if options is None: + options = VerificationOptions() + if opset_version is None: + opset_version = _constants.ONNX_DEFAULT_OPSET + """From aten graph, do binary search on graph partition to find operator export discrepancy.""" + # TODO: Copied from utils.py `export` until `_optimize_graph`. + if training == torch.onnx.TrainingMode.TRAINING: + model.train() + elif training == torch.onnx.TrainingMode.EVAL: + model.eval() + with torch.no_grad(): + inputs_for_export = _prepare_input_for_export(input_args, {}) + args = utils._decide_input_format(model, inputs_for_export) + + model = utils._pre_trace_quant_model(model, args) + graph, params, torch_out, module = utils._create_jit_graph(model, args) + params_dict = utils._get_named_param_dict(graph, params) + + utils._apply_friendly_debug_names(graph, params_dict) + + graph_info = GraphInfo( + graph, + input_args, + params_dict, + _experimental.ExportOptions( + do_constant_folding=do_constant_folding, + training=training, + opset_version=opset_version, + keep_initializers_as_inputs=keep_initializers_as_inputs, + verbose=verbose, + ), + ) + graph_info.find_mismatch(options) + graph_info.pretty_print_mismatch() + graph_info.pretty_print_tree() + + return graph_info