diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..334dabbe2d8d38c6c2a575dbd9e8fbaff0865f52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/__init__.py @@ -0,0 +1,1155 @@ +import copy +import dataclasses +import functools +import io +import json +import pathlib +import re +import sys + +import types +import warnings +import weakref +import zipfile +from collections import OrderedDict +from contextlib import contextmanager + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from unittest.mock import patch + +import sympy + +import torch +import torch._dynamo +import torch.fx +import torch.fx._pytree as fx_pytree + +import torch.utils._pytree as pytree +from torch._decomp import core_aten_decompositions, get_decompositions +from torch._dispatch.python import enable_python_dispatcher +from torch._dynamo.exc import UserError, UserErrorType +from torch._dynamo.source import ConstantSource +from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass +from torch._functorch.aot_autograd import aot_export_module, GraphSignature +from torch._functorch.eager_transforms import functionalize +from torch._guards import detect_fake_mode +from torch._ops import OpOverload +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch.export import _create_constraint, _Dim, Constraint +from torch.export.exported_program import ( + ExportedProgram, + ModuleCallEntry, + ModuleCallSignature, + _disable_prexisiting_fake_mode, +) +from torch.export.graph_signature import ( + _sig_to_specs, + ArgumentSpec, + ConstantArgument, + ExportGraphSignature, + InputKind, + InputSpec, + OutputKind, + OutputSpec, + SymIntArgument, + TensorArgument, +) +from torch.fx import traceback as fx_traceback +from torch.fx._compatibility import compatibility +from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode +from torch.fx.experimental.symbolic_shapes import ( + ConstraintViolationError, + GuardOnDataDependentSymNode, + ShapeEnv, + StrictMinMaxConstraint, +) +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo +from torch.utils._sympy.value_ranges import ValueRangeError, ValueRanges + +from .exported_program import ( + _create_stateful_graph_module, + _process_constraints, + CallSpec, +) +from .passes.add_runtime_assertions_for_constraints_pass import ( + _AddRuntimeAssertionsForInlineConstraintsPass, +) +from .passes.lift_constant_tensor_pass import lift_constant_tensor_pass +from .passes.remove_runtime_assertions import _RemoveRuntimeAssertionsPass +from .passes.replace_sym_size_ops_pass import _replace_sym_size_ops_pass +from .passes.replace_view_ops_with_view_copy_ops_pass import ( + ReplaceViewOpsWithViewCopyOpsPass, +) +from .wrappers import _wrap_submodules + + +def _process_dynamic_shapes( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None, +) -> Optional[List[Constraint]]: + if dynamic_shapes is None or len(dynamic_shapes) == 0: + return None + + kwargs = kwargs if kwargs is not None else {} + + from collections.abc import Mapping, Sequence + + def tree_zip(combined_args, dynamic_shapes): + if isinstance(combined_args, (tuple, list)): + if not isinstance(dynamic_shapes, Sequence): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be a Sequence, " + f"got {dynamic_shapes} instead", + ) + if len(combined_args) != len(dynamic_shapes): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected {dynamic_shapes} to have {len(combined_args)} items", + ) + for i, shape in enumerate(dynamic_shapes): + yield from tree_zip(combined_args[i], shape) + elif isinstance(combined_args, dict): + if not isinstance(dynamic_shapes, Mapping): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be a Mapping, " + f"got {dynamic_shapes} instead", + ) + if len(combined_args) != len(dynamic_shapes): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected {dynamic_shapes} to have {len(combined_args)} items", + ) + for k, shape in dynamic_shapes.items(): + yield from tree_zip(combined_args[k], shape) + elif dataclasses.is_dataclass(combined_args): + if not type(dynamic_shapes) == type(combined_args): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be a {type(combined_args)}, " + f"got {dynamic_shapes} instead", + ) + for f in dataclasses.fields(combined_args): + yield from tree_zip(getattr(combined_args, f.name), getattr(dynamic_shapes, f.name)) + elif isinstance(combined_args, torch.Tensor): + yield (combined_args, dynamic_shapes) + else: + if dynamic_shapes is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Expected dynamic_shapes of a {type(combined_args)} to be None, " + f"got {dynamic_shapes} instead", + ) + + def to_constraint(dim, tensor, i): + constraint = dynamic_dim(tensor, i, debug_name=dim.__name__) + if dim.min != 2: + constraint = constraint >= dim.min + if dim.max != sys.maxsize - 1: + constraint = constraint <= dim.max + return constraint + + from collections import defaultdict + symbols = defaultdict(list) + bounds: Dict[str, Tuple[int, int]] = {} + + def check_same_bounds(dim): + if dim.__name__ in symbols: + min_, max_ = bounds[dim.__name__] + if dim.min != min_ or dim.max != max_: + this_ = _Dim.readable(dim.__name__, min_, max_) + that_ = _Dim.readable(dim.__name__, dim.min, dim.max) + raise UserError( + UserErrorType.INVALID_INPUT, + f"Found different definitions {this_} and {that_} " + f"for the same symbolic dimension {dim}!" + ) + + else: + bounds[dim.__name__] = (dim.min, dim.max) + + def update_symbols(tensor, shape): + if isinstance(shape, dict): + for i, dim in shape.items(): + if isinstance(dim, _Dim): + check_same_bounds(dim) + symbols[dim.__name__].append(to_constraint(dim, tensor, i)) + else: + if dim is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected item #{i} ({dim}) in dynamic_shape {shape} of Tensor, " + "try None instead", + ) + elif isinstance(shape, (tuple, list)): + for i, dim in enumerate(shape): + if isinstance(dim, _Dim): + check_same_bounds(dim) + symbols[dim.__name__].append(to_constraint(dim, tensor, i)) + else: + if dim is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected item #{i} ({dim}) in dynamic_shape {shape} of Tensor, " + "try None instead", + ) + else: + if shape is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected dynamic_shape {shape} of Tensor, " + "try None instead", + ) + + import inspect + if isinstance(f, ExportedProgram): + f = f.module() + signature = inspect.signature(f.forward) if isinstance(f, torch.nn.Module) else inspect.signature(f) + combined_args = signature.bind(*args, **kwargs).arguments + + # This means user didn't specify dynamic shapes with argument names. + combined_args = combined_args if isinstance(dynamic_shapes, Mapping) else list(combined_args.values()) # type: ignore[assignment] + for tensor, shape in tree_zip(combined_args, dynamic_shapes): + update_symbols(tensor, shape) + + constraints = [] + for dynamic_dims in symbols.values(): + primary, *others = dynamic_dims + if others: + for other in others: + constraints.append(primary == other) + else: + constraints.append(primary) + + return constraints + + +def export__RC__( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), +) -> ExportedProgram: + """ + API for exporting with dynamic shape specifications instead of constraints. + It should be considered "release candidate" (RC), meant to replace `export`. + + Here, `dynamic_shapes` is expected to be a dict from + argument names of `f` to dynamic shape specifications OR a tuple where each element + corresponds to the original order of the arguments defined in the function signature + ,as follows: + - The dynamic shape of a tensor argument can be specified as: + - Either a dict from dynamic dimension indices to Dim types. It is not + required to include static dimension indices in this dict, but when + they are, they should be mapped to None. + - Or a tuple of Dim types or None. The Dim types correspond to dynamic + dimensions, whereas static dimensions are denoted by None. + - Arguments that are dicts or tuples of tensors are recursively specified + by using mappings or sequences of contained specifications. + + See `export` for documentation of `f`, `args`, `kwargs` and return. + """ + constraints = _process_dynamic_shapes(f, args, kwargs, dynamic_shapes) + return _export( + f, + args, + kwargs, + constraints=constraints, + strict=strict, + preserve_module_call_signature=preserve_module_call_signature + ) + + +def dynamic_dim(t: torch.Tensor, index: int, debug_name: Optional[str] = None): + if not isinstance(t, torch.Tensor): + raise UserError( + UserErrorType.DYNAMIC_DIM, + f"Expected tensor as input to dynamic_dim but got {type(t)}" + ) + + if t.dim() < 1: + raise UserError( + UserErrorType.DYNAMIC_DIM, + "Cannot mark 0-dimension tensors to be dynamic" + ) + + if index >= t.dim(): + raise UserError( + UserErrorType.DYNAMIC_DIM, + f"Expected the dimension passed to dynamic_dim to be in the range [0:{t.dim()-1}]" + f" but got {index}, which is out of bounds for the given tensor." + ) + + return _create_constraint( + weakref.ref(t), + id(t), + index, + StrictMinMaxConstraint( + vr=ValueRanges(lower=2, upper=sympy.oo), warn_only=False + ), + debug_name=debug_name, + ) + + +@dataclasses.dataclass +class ExportDynamoConfig: + """ + Manage Export-specific configurations of Dynamo. + """ + allow_rnn: bool = True + +DEFAULT_EXPORT_DYNAMO_CONFIG = ExportDynamoConfig() + + +DECOMP_TABLE = core_aten_decompositions() + + +# TODO(zhxchen17) This is not needed if we output pre_dispatch graph upfront from export(). +@contextmanager +def _disable_decomp_table(): + global DECOMP_TABLE + prev, DECOMP_TABLE = DECOMP_TABLE, {} + try: + yield + finally: + DECOMP_TABLE = prev + + +@compatibility(is_backward_compatible=False) +def capture_pre_autograd_graph( + f: Callable, + args: Tuple[Any], + kwargs: Optional[Dict[str, Any]] = None, + constraints: Optional[List[Constraint]] = None, +) -> torch.nn.Module: + """ + A helper function that is intended to trace a module before any pre-autograd + decomposition is run. The produced module will be "non-functional" and + composed of aten operators. Later this API will be deleted in favor of more general + torch.export API. + + Args: + f: A callable to be traced + + args: example positional inputs. + + kwargs: optional example keyword inputs. + + constraints: A optional list of constraints on the dynamic arguments specifying + their possible range of their shapes + + Returns: + An nn.Module containing the traced method. + + """ + + decomp_table = { + torch.ops.aten.dropout.default: torch.ops.aten.dropout.default.decompose, + torch.ops.aten.batch_norm.default: torch.ops.aten.batch_norm.default.decompose, + torch.ops.aten._batch_norm_impl_index.default: torch.ops.aten._batch_norm_impl_index.default.decompose, + torch.ops.aten.native_batch_norm.default: torch.ops.aten.native_batch_norm.default.decompose, + } + + if kwargs is None: + kwargs = {} + + with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)): + m = torch._dynamo.export( + f, + constraints=constraints, + assume_static_by_default=True, + tracing_mode="symbolic", + decomposition_table=decomp_table, + pre_dispatch=True, + aten_graph=True, + )( + *args, + **kwargs, + )[0] + + def _train(self, mode: bool = True): + raise NotImplementedError("Calling train() is not supported yet.") + + def _eval(self, mode: bool = True): + raise NotImplementedError("Calling eval() is not supported yet.") + + _, _, _, fake_mode = _convert_input_to_fake(m, args, kwargs) + + m.meta["inline_constraints"] = { + k: v + for k, v in fake_mode.shape_env.runtime_var_to_range.items() + if re.match(r"^[if]\d+$", str(k)) + } + + flat_args, _ = pytree.tree_flatten((args, kwargs or {})) + range_constraints, equality_constraints = _process_constraints(m, 0, flat_args) + unlifted_m = _create_stateful_graph_module( + m, + range_constraints=range_constraints, + equality_constraints=equality_constraints, + ) + unlifted_m.train = types.MethodType(_train, m) # type: ignore[method-assign] + unlifted_m.eval = types.MethodType(_eval, m) # type: ignore[method-assign] + return unlifted_m + + +def _convert_input_to_fake(gm, args, kwargs): + if len(args) == 0 and len(kwargs) == 0 and len(dict(gm.named_parameters())) == 0 and len(dict(gm.named_buffers())) == 0: + return [], {}, {}, None + + fake_inps: List[torch.Tensor] = [] + fake_mode = None + for node in gm.graph.nodes: + if node.op == "placeholder" and "val" in node.meta: + fake_val = node.meta["val"] + if fake_val is not None and isinstance(fake_val, torch.Tensor): + fake_inps.append(fake_val) + + if detected_fake_mode := detect_fake_mode(fake_inps): + fake_mode = detected_fake_mode + + assert fake_mode is not None, "Cannot find fake_mode attatched to the graph's placeholders." + + count = 0 + + def convert_to_fake(x): + nonlocal count + val = fake_inps[count] + count += 1 + return val + + fake_args = pytree.tree_map_only(torch.Tensor, convert_to_fake, args) + # TODO properly use the cached fake tensor + fake_kwargs = pytree.tree_map_only(torch.Tensor, fake_mode.from_tensor, kwargs) + fake_params_buffers = pytree.tree_map_only(torch.Tensor, + functools.partial(fake_mode.from_tensor, static_shapes=True), + {**dict(gm.named_parameters(remove_duplicate=False)), + **dict(gm.named_buffers(remove_duplicate=False))}) + return fake_args, fake_kwargs, fake_params_buffers, fake_mode + + +def _replace_param_buffer_names(param_buffer_table, sig): + for spec in sig.input_specs: + spec.target = param_buffer_table.get(spec.target, spec.target) + for spec in sig.output_specs: + spec.target = param_buffer_table.get(spec.target, spec.target) + + +def _normalize_nn_module_stack(gm_torch_level, root_cls): + # Append a root module to every nn_module_stack. + root = "L['self']" + root_key = re.sub(r'[^a-zA-Z0-9]', '_', root) + for gm in gm_torch_level.modules(): + if not isinstance(gm, torch.fx.GraphModule): + continue + for node in gm.graph.nodes: + if node.op in ["placeholder", "output"]: + continue + add_root = True + if nn_module_stack := node.meta.get("nn_module_stack", {}): + path, ty = next(iter(nn_module_stack.values())) + assert issubclass(ty, torch.nn.Module) + # TODO Figure out why sometimes we have root sometimes we don't. + if path == root and ty is root_cls: + add_root = False + if add_root: + def normalize_path(path): + try: + parts = [] + + class Path: + def __getattr__(self, name): + parts.append(name) + return self + + def __getitem__(self, idx): + parts.append(str(idx)) + return self + + eval(path, {"L": {"self": Path()}}) + return ".".join(parts) + except Exception: # TODO(zhxchen17) Remove this. + return path + + nn_module_stack = {root_key: (root, root_cls), **nn_module_stack} + node.meta["nn_module_stack"] = { + key: (normalize_path(path), ty) + for key, (path, ty) in nn_module_stack.items() + } + +def _export_to_torch_ir( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + constraints: Optional[List[Constraint]] = None, + *, + preserve_module_call_signature: Tuple[str, ...] = (), + disable_constraint_solver: bool = False, +) -> torch.fx.GraphModule: + """ + Traces either an nn.Module's forward function or just a callable with PyTorch + operations inside and produce a torch.fx.GraphModule in torch IR. + """ + + constraints = constraints or [] + kwargs = kwargs or {} + + if not isinstance(args, tuple): + raise UserError(UserErrorType.INVALID_INPUT, + f"Expecting `args` to be a tuple of example positional inputs, got {type(args)}") + + # We convert to nn.Module because __call__ of ExportedProgram + # is untracable right now. + if isinstance(f, ExportedProgram): + f = f.module() + + with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)): + try: + module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {} + with _wrap_submodules(f, preserve_module_call_signature, module_call_specs): + gm_torch_level, _ = torch._dynamo.export( + f, + constraints=constraints, + assume_static_by_default=True, + tracing_mode="symbolic", + disable_constraint_solver=disable_constraint_solver, + )( + *args, + **kwargs, + ) + except (ConstraintViolationError, ValueRangeError) as e: + raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200 + except GuardOnDataDependentSymNode as e: + raise UserError( # noqa: TRY200 + UserErrorType.ANTI_PATTERN, + f"Consider annotating your code using torch._constrain_as_*(). {str(e)}", + case_name="constrain_as_size_example", + ) + + gm_torch_level.meta["module_call_specs"] = module_call_specs + return gm_torch_level + + +def export( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + constraints: Optional[List[Constraint]] = None, + *, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), +) -> ExportedProgram: + + if constraints is not None: + warnings.warn( + "Using `constraints` to specify dynamic shapes for export is DEPRECATED " + "and will not be supported in the future. " + "Please use `dynamic_shapes` instead (see docs on `torch.export.export`).", + DeprecationWarning, + stacklevel=2, + ) + return _export( + f, + args, + kwargs, + constraints, + strict=strict, + preserve_module_call_signature=preserve_module_call_signature, + ) + + +def _unlift_user_inputs_to_buffers( + gm_torch_level: torch.fx.GraphModule, + aot_export_args +) -> List[str]: + flat_args = pytree.tree_leaves(aot_export_args) + user_input_names = [] + with gm_torch_level.graph.inserting_before(): + for i, (arg, node) in enumerate(zip(flat_args, gm_torch_level.graph.nodes)): + assert node.op == "placeholder" + user_input_names.append(node.name) + if isinstance(arg, torch.Tensor): + assert not hasattr(gm_torch_level, node.name) + gm_torch_level.register_buffer(node.name, arg) + get_attr = gm_torch_level.graph.get_attr(node.name) + node.replace_all_uses_with(get_attr) + get_attr.meta = copy.copy(node.meta) + + for node in list(gm_torch_level.graph.nodes): + if node.op == "placeholder": + assert len(node.users) == 0 + gm_torch_level.graph.erase_node(node) + gm_torch_level.recompile() + return user_input_names + + +def _lift_buffers_to_user_inputs( + gm: torch.fx.GraphModule, + graph_signature: GraphSignature, + user_input_names: List[str] +) -> Dict[str, str]: + assert len(graph_signature.user_inputs) == 0 + assert graph_signature.backward_signature is None + names = set(user_input_names) + + placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"] + # user inputs are always added in the end + start = len(graph_signature.parameters) + end = start + len(graph_signature.buffers) + buffer_nodes = placeholders[start:end] + last_placeholder_node = placeholders[-1] if len(placeholders) > 0 else None + old_nodes: Dict[str, torch.fx.Node] = {} + for node in buffer_nodes: + buffer_name = graph_signature.inputs_to_buffers[node.name] + if buffer_name not in names: + continue + old_nodes[buffer_name] = node + replaces = {} + new_node_names: Dict[str, str] = {} + with gm.graph.inserting_after(last_placeholder_node): + for name in reversed(user_input_names): + new_node = gm.graph.placeholder(name) + new_node.target = new_node.name + new_node_names[name] = new_node.name + if name in old_nodes: + old_node = old_nodes[name] + new_node.meta = copy.copy(old_node.meta) + old_node.replace_all_uses_with(new_node) + replaces[old_node.name] = new_node.name + new_node_names = dict(reversed(new_node_names.items())) + for old_node in old_nodes.values(): + gm.graph.erase_node(old_node) + + gm.recompile() + + graph_signature.buffers = [b for b in graph_signature.buffers if b not in names] + graph_signature.inputs_to_buffers = { + i: b for i, b in graph_signature.inputs_to_buffers.items() if b not in names + } + user_inputs_to_mutate = { + o: b for o, b in graph_signature.buffers_to_mutate.items() if b in names + } + graph_signature.buffers_to_mutate = { + o: b for o, b in graph_signature.buffers_to_mutate.items() if b not in names + } + graph_signature.user_inputs.extend(new_node_names.values()) # type: ignore[arg-type] + graph_signature.user_outputs = [ + replaces[o] if o in replaces else o for o in graph_signature.user_outputs + ] + return user_inputs_to_mutate # type: ignore[return-value] + + +def _export_non_strict( + mod, + fake_args, + fake_kwargs, + fake_params_buffers, + *, + transform=lambda x: x # TODO(zhxchen17) Revisit if this is needed later. +): + # This _reparametrize_module makes sure inputs and module.params/buffers have the same fake_mode, + # otherwise aot_export_module will error out because it sees a mix of fake_modes. + # And we want aot_export_module to use the fake_tensor mode in dynamo to keep the pipeline easy to reason about. + with torch.nn.utils.stateless._reparametrize_module(mod, fake_params_buffers): + gm, graph_signature = transform(aot_export_module)( + mod, + (*fake_args, *fake_kwargs.values()), + trace_joint=False + ) + + # NOTE: aot_export adds symint metadata for placeholders with int values; + # since these become specialized, we replace such metadata with the original values + flat_args = pytree.tree_leaves((fake_args, fake_kwargs)) + index = 0 + total_param_buffers = len(graph_signature.parameters) + len(graph_signature.buffers) + for node in gm.graph.nodes: + if node.op == "placeholder": + if index >= total_param_buffers: + user_arg = flat_args[index - total_param_buffers] + if not isinstance(user_arg, torch.Tensor): + node.meta["val"] = user_arg + index += 1 + + is_joint = graph_signature.backward_signature is not None + + def make_argument_spec(node) -> ArgumentSpec: + assert "val" in node.meta, f"{node} has no 'val' metadata field" + val = node.meta["val"] + if isinstance(val, FakeTensor): + return TensorArgument(name=node.name) + elif isinstance(val, torch.SymInt): + return SymIntArgument(name=node.name) + else: + return ConstantArgument(value=val) + + input_specs, output_specs = _sig_to_specs( + user_inputs=set(graph_signature.user_inputs), + inputs_to_parameters=graph_signature.inputs_to_parameters, # type: ignore[arg-type] + inputs_to_buffers=graph_signature.inputs_to_buffers, # type: ignore[arg-type] + user_outputs=set(graph_signature.user_outputs), # type: ignore[arg-type] + buffer_mutations=graph_signature.buffers_to_mutate, # type: ignore[arg-type] + user_input_mutations=gm.meta.get("user_inputs_to_mutate", {}), # type: ignore[arg-type] + grad_params=graph_signature.backward_signature.gradients_to_parameters if is_joint else {}, # type: ignore[arg-type, union-attr] + grad_user_inputs=graph_signature.backward_signature.gradients_to_user_inputs if is_joint else {}, # type: ignore[arg-type, union-attr] + loss_output=graph_signature.backward_signature.loss_output if is_joint else None, # type: ignore[arg-type, union-attr] + inputs=[make_argument_spec(node) for node in gm.graph.nodes if node.op == "placeholder"], + outputs=[make_argument_spec(node) for node in pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args)], + ) + export_graph_signature = ExportGraphSignature(input_specs=input_specs, output_specs=output_specs) + + tensor_constants = lift_constant_tensor_pass(gm, export_graph_signature) + + @dataclasses.dataclass + class _ExportedProgramNonStrict: + gm: torch.fx.GraphModule + sig: ExportGraphSignature + tensor_constants: Dict[str, torch.Tensor] + + return _ExportedProgramNonStrict( + gm, + export_graph_signature, + tensor_constants, + ) + + +def _get_params_buffers(mod: torch.nn.Module) -> Dict[str, torch.Tensor]: + params_buffers: Dict[str, torch.Tensor] = {} + for name, param in mod.named_parameters(remove_duplicate=False): + params_buffers[name] = param + + for name, buffer in mod.named_buffers(remove_duplicate=False): + params_buffers[name] = buffer + return params_buffers + + +@_disable_prexisiting_fake_mode +def _export( + f: Callable, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + constraints: Optional[List[Constraint]] = None, + *, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), +) -> ExportedProgram: + """ + Traces either an nn.Module's forward function or just a callable with PyTorch + operations inside and produce a ExportedProgram. + + Args: + m: the `nn.Module` or callable to trace. + + args: example positional inputs. + + kwargs: optional example keyword inputs. + + constraints: A optional list of constraints on the dynamic arguments specifying + their possible range of their shapes + + preserve_module_call_signature: A list of submodule paths for which the original + calling conventions are preserved as metadata. + + Returns: + An ExportedProgram containing the traced method. + """ + constraints = constraints or [] + kwargs = kwargs or {} + + if not strict: + assert isinstance(f, torch.nn.Module) + assert len(preserve_module_call_signature) == 0 + assert len(constraints) == 0, "dynamic shape NYI" + assert len(kwargs) == 0, "keyword arguments NYI" + out_spec = None + + def _tuplify_outputs(aot_export): + def _aot_export_non_strict(mod, args, **kwargs): + class Wrapper(torch.nn.Module): + def __init__(self, mod): + super().__init__() + self._export_root = mod + + def forward(self, *args, **kwargs): + nonlocal out_spec + flat_outs, out_spec = pytree.tree_flatten(self._export_root(*args, **kwargs)) + return tuple(flat_outs) + + gm, sig = aot_export(Wrapper(mod), args, **kwargs) + + def strip_root(x): + return x[len('_export_root.'):] if x.startswith('_export_root.') else x + + sig.parameters = pytree.tree_map(strip_root, sig.parameters) + sig.buffers = pytree.tree_map(strip_root, sig.buffers) + sig.inputs_to_buffers = pytree.tree_map(strip_root, sig.inputs_to_buffers) + sig.inputs_to_parameters = pytree.tree_map(strip_root, sig.inputs_to_parameters) + sig.buffers_to_mutate = pytree.tree_map(strip_root, sig.buffers_to_mutate) + return gm, sig + return _aot_export_non_strict + ep_non_strict = _export_non_strict(f, args, {}, f.state_dict(), transform=_tuplify_outputs) + assert out_spec is not None + return ExportedProgram( + ep_non_strict.gm, + ep_non_strict.gm.graph, + ep_non_strict.sig, + _get_params_buffers(f), + {}, + [], + [ModuleCallEntry("", ModuleCallSignature([], [], pytree.tree_flatten((args, {}))[1], out_spec))], + (args, kwargs), + tensor_constants=ep_non_strict.tensor_constants, + ) + + + gm_torch_level = _export_to_torch_ir( + f, + args, + kwargs, + constraints, + preserve_module_call_signature=preserve_module_call_signature, + ) + + params_buffers = _get_params_buffers(gm_torch_level) + + # We detect the fake_mode by looking at gm_torch_level's placeholders, this is the fake_mode created in dynamo. + fake_args, fake_kwargs, fake_params_buffers, dynamo_fake_mode = _convert_input_to_fake(gm_torch_level, args, kwargs) + + # First, we want to pass through the graph to try populating + # val field for getattr if there is anything missing. + # THis can happen when quantization adds extra params and forgets + # to update "val" + for node in gm_torch_level.graph.nodes: + if node.op == "get_attr" and "val" not in node.meta: + attr = getattr(gm_torch_level, node.target) + # Checks if it is not a HigherOrderOp branch or a module + if not isinstance(attr, torch.nn.Module): + assert dynamo_fake_mode is not None, ( + "Cannot find dynamo_fake_mode. This could be due to the exported graph module have no placeholders." + ) + node.meta["val"] = dynamo_fake_mode.from_tensor(attr, static_shapes=True) + + # When aot_export lifts the params, we lose the nn_module_stack + # and source_fn from the param nodes as they are treated as fresh inputs + # Therefore, we manually extract them before calling into aot_export + params_buffers_to_node_meta = {} + for node in gm_torch_level.graph.nodes: + target = node.target + meta = node.meta + if node.op == "call_module": + submodule = getattr(gm_torch_level, target) + if isinstance(submodule, torch.nn.Module): + for name, _ in submodule.named_parameters(recurse=True, remove_duplicate=False): + params_buffers_to_node_meta[target + "." + name] = meta + + for name, _ in submodule.named_buffers(recurse=True, remove_duplicate=False): + params_buffers_to_node_meta[target + "." + name] = meta + + if node.op == "get_attr": + submodule = getattr(gm_torch_level, target) + if not isinstance(submodule, torch.fx.GraphModule): + params_buffers_to_node_meta[target] = meta + + # If the call_function uses param as input, we also need to update params' meta + # with this call_function node's meta. + # This is basically the same flow as torch.fx.traceback.preserve_meta() + if node.op == "call_function" and not isinstance(node.target, torch._ops.HigherOrderOperator): + for arg in node._input_nodes: + if arg.op == "get_attr": + for entry in torch.fx.proxy._COPY_META_FIELDS: + if entry in meta: + params_buffers_to_node_meta[arg.target][entry] = meta[entry] + + # Fix the graph output signature to be tuple if scalar + out_spec = orig_out_spec = gm_torch_level._out_spec + assert out_spec is not None + # aot_export expect the return type to always be a tuple. + if out_spec.type not in (list, tuple): + out_spec = pytree.TreeSpec(tuple, None, [out_spec]) + + orig_args = gm_torch_level.graph._codegen.pytree_info.orig_args # type: ignore[attr-defined] + + gm_torch_level.graph._codegen = _PyTreeCodeGen( + _PyTreeInfo( + orig_args, + gm_torch_level._in_spec, + out_spec, + ) + ) + gm_torch_level.recompile() + + param_buffer_table: Dict[str, str] = {} + if isinstance(f, torch.nn.Module): + param_lookup: Dict[int, List[str]] = {} + buffer_lookup: Dict[int, List[str]] = {} + for name, param in f.named_parameters(remove_duplicate=False): + param_lookup.setdefault(id(param), []).append(name) + for name, buffer in f.named_buffers(remove_duplicate=False): + buffer_lookup.setdefault(id(buffer), []).append(name) + for dynamo_name, dynamo_param in gm_torch_level.named_parameters(remove_duplicate=False): + assert dynamo_name not in param_buffer_table + if id(dynamo_param) in param_lookup: + param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)].pop() + + for dynamo_name, dynamo_buffer in gm_torch_level.named_buffers(remove_duplicate=False): + assert dynamo_name not in param_buffer_table + if id(dynamo_buffer) in buffer_lookup: + param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)].pop() + + if isinstance(f, torch.nn.Module): + _normalize_nn_module_stack(gm_torch_level, type(f)) + + def _process_user_inputs(aot_export): + def _aot_export_strict(gm_torch_level: torch.fx.GraphModule, args, **kwargs): + user_input_names = _unlift_user_inputs_to_buffers(gm_torch_level, args) + gm, graph_signature = aot_export(gm_torch_level, (), **kwargs) + user_inputs_to_mutate = _lift_buffers_to_user_inputs(gm, graph_signature, user_input_names) + # TODO unfortunately preserving graph-level metadata is not + # working well with aot_export. So we manually copy it. + # (The node-level meta is addressed above.) + gm.meta.update(gm_torch_level.meta) + assert "user_inputs_to_mutate" not in gm.meta + gm.meta["user_inputs_to_mutate"] = user_inputs_to_mutate + return gm, graph_signature + + return _aot_export_strict + + # Note: aot_export_module doesn't accept kwargs, we'd like to reorder the kwargs as an OrderedDict + # to follow the order in orig_args and correctly call module + ep_non_strict = _export_non_strict( + gm_torch_level, + fake_args, + _reorder_kwargs_by_names(orig_args, fake_args, fake_kwargs), + fake_params_buffers, + transform=_process_user_inputs + ) + + gm = ep_non_strict.gm + export_graph_signature = ep_non_strict.sig + tensor_constants = ep_non_strict.tensor_constants + + # After aot_export, set the param/buffer metadata back into placeholders + # Technically, users can still construct this data from param names + # without relying on this metadata + for node in gm.graph.nodes: + if node.op == "placeholder": + if node.target in export_graph_signature.inputs_to_parameters: + param_name = export_graph_signature.inputs_to_parameters[node.target] + if param_name in params_buffers_to_node_meta: + for k, v in params_buffers_to_node_meta[param_name].items(): + node.meta[k] = v + if node.target in export_graph_signature.inputs_to_buffers: + buffer_name = export_graph_signature.inputs_to_buffers[node.target] + if buffer_name in params_buffers_to_node_meta: + for k, v in params_buffers_to_node_meta[buffer_name].items(): + node.meta[k] = v + + # The unbacked symint symbols are updated in aot_export + # so we serialize them here instead of inside dynamo + + # dynamo_fake_mode can be None if there's no placeholder in gm_torch_level + if dynamo_fake_mode: + gm.meta["inline_constraints"] = { + k: v + for k, v in dynamo_fake_mode.shape_env.runtime_var_to_range.items() + if re.match(r"^[if]\d+$", str(k)) + } + + num_lifted = next( + (i for i, s in enumerate(export_graph_signature.input_specs) if s.kind == InputKind.USER_INPUT), 0 + ) + flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs)) + range_constraints, equality_constraints = _process_constraints( + gm, + num_lifted, + flat_args, + ) + + if isinstance(f, torch.nn.Module): + _replace_param_buffer_names(param_buffer_table, export_graph_signature) + params_buffers = {param_buffer_table.get(name, name): tensor for name, tensor in params_buffers.items()} + + module_call_signatures = { + fqn: ModuleCallSignature(inputs=[], outputs=[], **specs) + for fqn, specs in gm_torch_level.meta["module_call_specs"].items() + } + + if len(preserve_module_call_signature) > 0: + res = CollectTracepointsPass(module_call_signatures, export_graph_signature)(gm) + assert res is not None + gm = res.graph_module + + assert orig_out_spec is not None + exported_program = ExportedProgram( + gm, + gm.graph, + export_graph_signature, + # TODO(zhxchen17) Return empty state_dict for functions. + params_buffers, + range_constraints, + equality_constraints, + [ModuleCallEntry("", ModuleCallSignature(inputs=[], outputs=[], in_spec=orig_in_spec, out_spec=orig_out_spec))] + + [ModuleCallEntry(fqn, sig) for fqn, sig in module_call_signatures.items()], + (args, kwargs), + tensor_constants=tensor_constants, + ) + + if len(range_constraints) > 0 or len(equality_constraints) > 0: + exported_program = exported_program._transform( + _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints, equality_constraints) + ) + + return exported_program + + +def _reorder_kwargs_by_names(arg_names: List[str], args: Tuple[Any], kwargs: Dict[str, Any]): + assert len(arg_names) == len(args) + len(kwargs), ( + f"Total number of arg names is expected to be {len(arg_names)} " + f"but got {len(args)} positional args, {len(kwargs)} kwargs." + ) + return {kw_name: kwargs[kw_name] for kw_name in arg_names[len(args):]} + + +def save( + ep: ExportedProgram, + f: Union[str, pathlib.Path, io.BytesIO], + *, + extra_files: Optional[Dict[str, Any]] = None, + opset_version: Optional[Dict[str, int]] = None, +) -> None: + from .serde.serialize import serialize, SerializedArtifact + from .serde.schema import SCHEMA_VERSION + artifact: SerializedArtifact = serialize(ep, opset_version) + + if isinstance(f, (str, pathlib.Path)): + f = str(f) + + with zipfile.ZipFile(f, 'w') as zipf: + # Save every field the SerializedArtifact to a file + for field in dataclasses.fields(artifact): + field_name = field.name + serialized_field = getattr(artifact, field_name) + zipf.writestr(f"serialized_{field_name}.json", serialized_field) + + zipf.writestr('version', str(SCHEMA_VERSION)) + + # Add extra files if provided + if extra_files: + for extra_file_name, content in extra_files.items(): + encoded_content = content.encode('utf-8') + zipf.writestr(f"extra_files/{extra_file_name}", encoded_content) + + +def load( + f: Union[str, pathlib.Path, io.BytesIO], + *, + extra_files: Optional[Dict[str, Any]] = None, + expected_opset_version: Optional[Dict[str, int]] = None, +) -> ExportedProgram: + if isinstance(f, (str, pathlib.Path)): + f = str(f) + + with zipfile.ZipFile(f, 'r') as zipf: + # Check the version + version = int(zipf.read('version')) + from .serde.schema import SCHEMA_VERSION + + if version != SCHEMA_VERSION: + raise RuntimeError( + f"Serialized version {version} does not match our current " + f"schema version {SCHEMA_VERSION}." + ) + + from .serde.serialize import deserialize, SerializedArtifact + + # Load serialized_ep and serialized_state_dict from the zip file + artifact: SerializedArtifact = SerializedArtifact( + **{ + field.name: zipf.read(f"serialized_{field.name}.json") + for field in dataclasses.fields(SerializedArtifact) + } + ) + + # Deserialize ExportedProgram + ep = deserialize(artifact) + + # Populate extra_files map + if extra_files is not None: + for filename in extra_files.keys(): + extra_files[filename] = zipf.read(f"extra_files/{filename}").decode('utf-8') + + return ep + + +def aot_compile( + f: Callable, + args: Tuple[Any], + kwargs: Optional[Dict[str, Any]] = None, + *, + constraints: Optional[List[Constraint]] = None, + dynamic_shapes: Optional[Dict[str, Any]] = None, + options: Optional[Dict[str, Any]] = None, + remove_runtime_assertions: bool = False, + disable_constraint_solver: bool = False, +) -> str: + """ + Note: this function is not stable yet + + Traces either an nn.Module's forward function or just a callable with PyTorch + operations inside, generates executable cpp code from the program, and returns + the path to the generated shared library + + Args: + f: the `nn.Module` or callable to trace. + + args: example positional inputs. + + kwargs: optional example keyword inputs. + + constraints: A optional list of constraints on the dynamic arguments specifying + their possible range of their shapes + + dynamic_shapes: An experimental new feature designed to subsume ``constraints``. + A dict mapping argument names of ``f`` to their dynamic shape + specifications, as follows. Dynamic shape specifications can be a + dict from dynamic dimensions to ``Dim`` types, or a tuple/list of + ``Optional[Dim]`` corresponding to each input dimension. + + options: A dictionary of options to control inductor + + disable_constraint_solver: Whether the dim constraint solver must be disabled. + + Returns: + Path to the generated shared library + """ + if constraints is not None: + warnings.warn( + "The constraints field is deprecated. " + "Please use dynamic_shapes instead." + ) + + from torch._inductor.decomposition import select_decomp_table + + if constraints is None: + constraints = _process_dynamic_shapes(f, args, kwargs, dynamic_shapes) + + # We want to export to Torch IR here to utilize the pre_grad passes in + # inductor, which run on Torch IR. + gm = _export_to_torch_ir( + f, + args, + kwargs, + constraints, + disable_constraint_solver=disable_constraint_solver + ) + flat_example_inputs = pytree.arg_tree_leaves(*args, **(kwargs or {})) + + with torch.no_grad(): + so_path = torch._inductor.aot_compile(gm, flat_example_inputs, options) # type: ignore[arg-type] + + return so_path diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10a55772ab58b21573a6eba0356ddd3080164ac7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60ef1fef88ac045b797614d519040a11e4c6a2f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b52cf345f74cede8ad40b8a64652a249c667cff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f6f96fc4a44aa88ebf67b31173613df719dea3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..451d99370b28f030733f0e16f36d2805892e96f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/case.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/case.py new file mode 100644 index 0000000000000000000000000000000000000000..ee6d011e7bf95b56241d78366840ebe6045de42c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/case.py @@ -0,0 +1,188 @@ +import inspect +import re +import string +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import torch + +_TAGS: Dict[str, Dict[str, Any]] = { + "torch": { + "cond": {}, + "dynamic-shape": {}, + "escape-hatch": {}, + "map": {}, + "dynamic-value": {}, + "operator": {}, + "mutation": {}, + }, + "python": { + "assert": {}, + "builtin": {}, + "closure": {}, + "context-manager": {}, + "control-flow": {}, + "data-structure": {}, + "standard-library": {}, + "object-model": {}, + }, +} + + +class SupportLevel(Enum): + """ + Indicates at what stage the feature + used in the example is handled in export. + """ + + SUPPORTED = 1 + NOT_SUPPORTED_YET = 0 + + +class ExportArgs: + __slots__ = ("args", "kwargs") + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +InputsType = Union[Tuple[Any, ...], ExportArgs] + + +def check_inputs_type(x): + if not isinstance(x, (ExportArgs, tuple)): + raise ValueError( + f"Expecting inputs type to be either a tuple, or ExportArgs, got: {type(x)}" + ) + + +def _validate_tag(tag: str): + parts = tag.split(".") + t = _TAGS + for part in parts: + assert set(part) <= set( + string.ascii_lowercase + "-" + ), f"Tag contains invalid characters: {part}" + if part in t: + t = t[part] + else: + raise ValueError(f"Tag {tag} is not found in registered tags.") + + +@dataclass(frozen=True) +class ExportCase: + example_inputs: InputsType + description: str # A description of the use case. + model: torch.nn.Module + name: str + extra_inputs: Optional[InputsType] = None # For testing graph generalization. + # Tags associated with the use case. (e.g dynamic-shape, escape-hatch) + tags: Set[str] = field(default_factory=set) + support_level: SupportLevel = SupportLevel.SUPPORTED + dynamic_shapes: Optional[Dict[str, Any]] = None + + def __post_init__(self): + check_inputs_type(self.example_inputs) + if self.extra_inputs is not None: + check_inputs_type(self.extra_inputs) + + for tag in self.tags: + _validate_tag(tag) + + if not isinstance(self.description, str) or len(self.description) == 0: + raise ValueError(f'Invalid description: "{self.description}"') + + +_EXAMPLE_CASES: Dict[str, ExportCase] = {} +_MODULES = set() +_EXAMPLE_CONFLICT_CASES = {} +_EXAMPLE_REWRITE_CASES: Dict[str, List[ExportCase]] = {} + + +def register_db_case(case: ExportCase) -> None: + """ + Registers a user provided ExportCase into example bank. + """ + if case.name in _EXAMPLE_CASES: + if case.name not in _EXAMPLE_CONFLICT_CASES: + _EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]] + _EXAMPLE_CONFLICT_CASES[case.name].append(case) + return + + _EXAMPLE_CASES[case.name] = case + + +def to_snake_case(name): + name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower() + + +def _make_export_case(m, name, configs): + if inspect.isclass(m): + if not issubclass(m, torch.nn.Module): + raise TypeError("Export case class should be a torch.nn.Module.") + m = m() + + if "description" not in configs: + # Fallback to docstring if description is missing. + assert ( + m.__doc__ is not None + ), f"Could not find description or docstring for export case: {m}" + configs = {**configs, "description": m.__doc__} + return ExportCase(**{**configs, "model": m, "name": name}) + + +def export_case(**kwargs): + """ + Decorator for registering a user provided case into example bank. + """ + + def wrapper(m): + configs = kwargs + module = inspect.getmodule(m) + if module in _MODULES: + raise RuntimeError("export_case should only be used once per example file.") + + _MODULES.add(module) + normalized_name = to_snake_case(m.__name__) + assert module is not None + module_name = module.__name__.split(".")[-1] + if module_name != normalized_name: + raise RuntimeError( + f'Module name "{module.__name__}" is inconsistent with exported program ' + + f'name "{m.__name__}". Please rename the module to "{normalized_name}".' + ) + + case = _make_export_case(m, module_name, configs) + register_db_case(case) + return case + + return wrapper + + +def export_rewrite_case(**kwargs): + def wrapper(m): + configs = kwargs + + parent = configs.pop("parent") + assert isinstance(parent, ExportCase) + key = parent.name + if key not in _EXAMPLE_REWRITE_CASES: + _EXAMPLE_REWRITE_CASES[key] = [] + + configs["example_inputs"] = parent.example_inputs + case = _make_export_case(m, to_snake_case(m.__name__), configs) + _EXAMPLE_REWRITE_CASES[key].append(case) + return case + + return wrapper + + +def normalize_inputs(x: InputsType) -> ExportArgs: + if isinstance(x, tuple): + return ExportArgs(*x) + + assert isinstance(x, ExportArgs) + return x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d737548c3d480d11e722ad5ae076cebe9f2523c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py @@ -0,0 +1,52 @@ +import glob +import importlib +from os.path import basename, dirname, isfile, join + +import torch +from torch._export.db.case import ( + _EXAMPLE_CASES, + _EXAMPLE_CONFLICT_CASES, + _EXAMPLE_REWRITE_CASES, + SupportLevel, +) + + +modules = glob.glob(join(dirname(__file__), "*.py")) +__all__ = [ + basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") +] + +# Import all module in the current directory. +from . import * # noqa: F403 + + +def all_examples(): + return _EXAMPLE_CASES + + +if len(_EXAMPLE_CONFLICT_CASES) > 0: + + def get_name(case): + model = case.model + if isinstance(model, torch.nn.Module): + model = type(model) + return model.__name__ + + msg = "Error on conflict export case name.\n" + for case_name, cases in _EXAMPLE_CONFLICT_CASES.items(): + msg += f"Case name {case_name} is associated with multiple cases:\n " + msg += f"[{','.join(map(get_name, cases))}]\n" + + raise RuntimeError(msg) + + +def filter_examples_by_support_level(support_level: SupportLevel): + return { + key: val + for key, val in all_examples().items() + if val.support_level == support_level + } + + +def get_rewrite_cases(case): + return _EXAMPLE_REWRITE_CASES.get(case.name, []) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py new file mode 100644 index 0000000000000000000000000000000000000000..664aab8b64da2b239daaa2d78c068a1d7397c4a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py @@ -0,0 +1,24 @@ +import torch +import torch._dynamo as torchdynamo + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.tensor(4)), + tags={"torch.escape-hatch"}, +) +class AssumeConstantResult(torch.nn.Module): + """ + Applying `assume_constant_result` decorator to burn make non-tracable code as constant. + """ + + def __init__(self): + super().__init__() + + @torchdynamo.assume_constant_result + def get_item(self, y): + return y.int().item() + + def forward(self, x, y): + return x[: self.get_item(y)] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py new file mode 100644 index 0000000000000000000000000000000000000000..9c8aeadc45ae291f363bb4850b30bab4fb14214d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py @@ -0,0 +1,26 @@ +import torch + +from torch._export.db.case import export_case + + +class MyAutogradFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x.clone() + + @staticmethod + def backward(ctx, grad_output): + return grad_output + 1 + + +@export_case( + example_inputs=(torch.randn(3, 2),), +) +class AutogradFunction(torch.nn.Module): + """ + TorchDynamo does not keep track of backward() on autograd functions. We recommend to + use `allow_in_graph` to mitigate this problem. + """ + + def forward(self, x): + return MyAutogradFunction.apply(x) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..77c629559d21eb6390c00ce8143d773d16f5710f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py @@ -0,0 +1,24 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 4),), +) +class ClassMethod(torch.nn.Module): + """ + Class methods are inlined during tracing. + """ + + @classmethod + def method(cls, x): + return x + 1 + + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(4, 2) + + def forward(self, x): + x = self.linear(x) + return self.method(x) * self.__class__.method(x) * type(self).method(x) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py new file mode 100644 index 0000000000000000000000000000000000000000..68dd3772684d1c8ea784a5d74214895dedeeb530 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py @@ -0,0 +1,46 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +class MySubModule(torch.nn.Module): + def foo(self, x): + return x.cos() + + def forward(self, x): + return self.foo(x) + + +@export_case( + example_inputs=(torch.ones(3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +class CondBranchClassMethod(torch.nn.Module): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + + This example demonstrates using class method in cond(). + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def __init__(self): + super().__init__() + self.subm = MySubModule() + + def bar(self, x): + return x.sin() + + def forward(self, x): + return cond(x.shape[0] <= 2, self.subm.forward, self.bar, [x]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py new file mode 100644 index 0000000000000000000000000000000000000000..c403c83de6a176ee5d33f8be24136e384c523116 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py @@ -0,0 +1,41 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +def cond_branch_nested_function(x): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + This example demonstrates using nested function in cond(). + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def true_fn(x): + def inner_true_fn(y): + return x + y + + return inner_true_fn(x) + + def false_fn(x): + def inner_false_fn(y): + return x - y + + return inner_false_fn(x) + + return cond(x.shape[0] < 10, true_fn, false_fn, [x]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py new file mode 100644 index 0000000000000000000000000000000000000000..bad8f121792e1c39ec1f342b2a07bbe26f3f70b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py @@ -0,0 +1,59 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(6),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +def cond_branch_nonlocal_variables(x): + """ + The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules: + - both branches must take the same args, which must also match the branch args passed to cond. + - both branches must return a single tensor + - returned tensor must have the same tensor metadata, e.g. shape and dtype + - branch function can be free function, nested function, lambda, class methods + - branch function can not have closure variables + - no inplace mutations on inputs or global variables + + This example demonstrates how to rewrite code to avoid capturing closure variables in branch functions. + + The code below will not work because capturing closure variables is not supported. + ``` + my_tensor_var = x + 100 + my_primitive_var = 3.14 + + def true_fn(y): + nonlocal my_tensor_var, my_primitive_var + return y + my_tensor_var + my_primitive_var + + def false_fn(y): + nonlocal my_tensor_var, my_primitive_var + return y - my_tensor_var - my_primitive_var + + return cond(x.shape[0] > 5, true_fn, false_fn, [x]) + ``` + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + my_tensor_var = x + 100 + my_primitive_var = 3.14 + + def true_fn(x, y, z): + return x + y + z + + def false_fn(x, y, z): + return x - y - z + + return cond( + x.shape[0] > 5, + true_fn, + false_fn, + [x, my_tensor_var, torch.tensor(my_primitive_var)], + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py new file mode 100644 index 0000000000000000000000000000000000000000..584036329244a91bbb801556e1ee1a0dd4d6ee53 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py @@ -0,0 +1,35 @@ +import torch + +from torch._export.db.case import export_case +from torch.export import Dim +from functorch.experimental.control_flow import cond + +x = torch.randn(3, 2) +y = torch.ones(2) +dim0_x = Dim("dim0_x") + +@export_case( + example_inputs=(x, y), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, + extra_inputs=(torch.randn(2, 2), torch.ones(2)), + dynamic_shapes={"x": {0: dim0_x}, "y": None}, +) +def cond_operands(x, y): + """ + The operands passed to cond() must be: + - a list of tensors + - match arguments of `true_fn` and `false_fn` + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + def true_fn(x, y): + return x + y + + def false_fn(x, y): + return x - y + + return cond(x.shape[0] > 2, true_fn, false_fn, [x, y]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py new file mode 100644 index 0000000000000000000000000000000000000000..1cded103aeeb8ed7f2e1e5907d216b37be4414b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py @@ -0,0 +1,25 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.ones(6, 4, 3),), + tags={ + "torch.cond", + "torch.dynamic-shape", + }, +) +def cond_predicate(x): + """ + The conditional statement (aka predicate) passed to cond() must be one of the following: + - torch.Tensor with a single element + - boolean expression + + NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized. + """ + + pred = x.dim() > 2 and x.shape[2] > 10 + + return cond(pred, lambda x: x.cos(), lambda y: y.sin(), [x]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py new file mode 100644 index 0000000000000000000000000000000000000000..dade607bdc757a3d29892ec9e4e0471d8a29b509 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py @@ -0,0 +1,22 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.tensor(4),), + tags={ + "torch.dynamic-value", + "torch.escape-hatch", + }, +) +def constrain_as_size_example(x): + """ + If the value is not known at tracing time, you can provide hint so that we + can trace further. Please look at constrain_as_value and constrain_as_size APIs + constrain_as_size is used for values that NEED to be used for constructing + tensor. + """ + a = x.item() + torch._constrain_as_size(a, min=0, max=5) + return torch.ones((a, 5)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..39eff84af34812e1a31006c698652ec6dc2bbd20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py @@ -0,0 +1,26 @@ +import functools + +import torch + +from torch._export.db.case import export_case + + +def test_decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + 1 + + return wrapper + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.ones(3, 2)), +) +class Decorator(torch.nn.Module): + """ + Decorators calls are inlined into the exported function during tracing. + """ + + @test_decorator + def forward(self, x, y): + return x + y diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py new file mode 100644 index 0000000000000000000000000000000000000000..9db3bfccb5cfbb3c41cccb8bc592c5738e5f2631 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2), torch.tensor(4)), + tags={"python.data-structure"}, +) +def dictionary(x, y): + """ + Dictionary structures are inlined and flattened along tracing. + """ + elements = {} + elements["x2"] = x * x + y = y * elements["x2"] + return {"y": y} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py new file mode 100644 index 0000000000000000000000000000000000000000..795c0f3803f2e4e7bb147222499629f7bf706ece --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py @@ -0,0 +1,18 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.assert"}, +) +def dynamic_shape_assert(x): + """ + A basic usage of python assertion. + """ + # assertion with error message + assert x.shape[0] > 2, f"{x.shape[0]} is greater than 2" + # assertion without error message + assert x.shape[0] > 1 + return x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..6a4d1e001de4b9121ef251f63748799dd5664134 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py @@ -0,0 +1,15 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape"}, +) +def dynamic_shape_constructor(x): + """ + Tensor constructors should be captured with dynamic shape inputs rather + than being baked in with static shape. + """ + return torch.ones(x.shape[0] * 2) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py new file mode 100644 index 0000000000000000000000000000000000000000..45c8d36bee1fa7ed0102809a6871fbfa76628696 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2, 2),), + tags={"torch.dynamic-shape", "python.control-flow"}, +) +class DynamicShapeIfGuard(torch.nn.Module): + """ + `if` statement with backed dynamic shape predicate will be specialized into + one particular branch and generate a guard. However, export will fail if the + the dimension is marked as dynamic shape from higher level API. + """ + + def forward(self, x): + if x.shape[0] == 3: + return x.cos() + + return x.sin() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py new file mode 100644 index 0000000000000000000000000000000000000000..62709520fad38785d7f2cc0e818d71ea524c95da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel +from torch.export import Dim + +x = torch.ones(3, 2) +dim0_x = Dim("dim0_x") + +@export_case( + example_inputs=(x,), + tags={"torch.dynamic-shape", "python.builtin"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, + dynamic_shapes={"x": {0: dim0_x}}, +) +def dynamic_shape_round(x): + """ + Calling round on dynamic shapes is not supported. + """ + return x[: round(x.shape[0] / 2)] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..b95334509a54a02cf678bebd93436131b69e8c67 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py @@ -0,0 +1,15 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape"}, +) +def dynamic_shape_slicing(x): + """ + Slices with dynamic shape arguments should be captured into the graph + rather than being baked in. + """ + return x[: x.shape[0] - 2, x.shape[1] - 1 :: 2] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py new file mode 100644 index 0000000000000000000000000000000000000000..c6c577b6cfae2d58d39245280e9b2054314e9aae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(10, 10),), + tags={"torch.dynamic-shape"}, +) +def dynamic_shape_view(x): + """ + Dynamic shapes should be propagated to view arguments instead of being + baked into the exported graph. + """ + new_x_shape = x.size()[:-1] + (2, 5) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6dbd935a6cb0ab00dad49bd121d3f79f855957 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py @@ -0,0 +1,28 @@ +import torch + +from torch._export.db.case import export_case, ExportArgs, SupportLevel + + +@export_case( + example_inputs=ExportArgs( + torch.randn(4), + (torch.randn(4), torch.randn(4)), + *[torch.randn(4), torch.randn(4)], + mykw0=torch.randn(4), + input0=torch.randn(4), input1=torch.randn(4) + ), + tags={"python.data-structure"}, + support_level=SupportLevel.SUPPORTED, +) +def fn_with_kwargs(pos0, tuple0, *myargs, mykw0, **mykwargs): + """ + Keyword arguments are not supported at the moment. + """ + out = pos0 + for arg in tuple0: + out = out * arg + for arg in myargs: + out = out * arg + out = out * mykw0 + out = out * mykwargs["input0"] * mykwargs["input1"] + return out diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..a222049d31bd884deb8107fd464726481cbff1a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape", "python.data-structure", "python.assert"}, +) +def list_contains(x): + """ + List containment relation can be checked on a dynamic shape or constants. + """ + assert x.size(-1) in [6, 2] + assert x.size(0) not in [4, 5, 6] + assert "monkey" not in ["cow", "pig"] + return x + x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py new file mode 100644 index 0000000000000000000000000000000000000000..d2d92e08b88b9b12668cd35e9fedbb76ad2f4e00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py @@ -0,0 +1,18 @@ +from typing import List + +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=([torch.ones(3, 2), torch.tensor(4), torch.tensor(5)],), + tags={"python.control-flow", "python.data-structure"}, +) +def list_unpack(args: List[torch.Tensor]): + """ + Lists are treated as static construct, therefore unpacking should be + erased after tracing. + """ + x, *y = args + return x + y[0] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d76cc67eda8cbb3306f27b2315ae35c7517aa2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py @@ -0,0 +1,25 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.object-model"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class ModelAttrMutation(torch.nn.Module): + """ + Attribute mutation is not supported. + """ + + def __init__(self): + super().__init__() + self.attr_list = [torch.ones(3, 2), torch.ones(3, 2)] + + def recreate_list(self): + return [torch.zeros(3, 2), torch.zeros(3, 2)] + + def forward(self, x): + self.attr_list = self.recreate_list() + return x.sum() + self.attr_list[0].sum() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py new file mode 100644 index 0000000000000000000000000000000000000000..4a06207b6eaf8f24d673c7ec227c3a5643c2d6a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.randn(2, 3),), + tags={"python.object-model"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class OptionalInput(torch.nn.Module): + """ + Tracing through optional input is not supported yet + """ + + def forward(self, x, y=torch.ones(2, 3)): + if y is not None: + return x + y + return x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..d87734208320959ed1b81ae6e787c86ee574661a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py @@ -0,0 +1,16 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel +from torch.utils import _pytree as pytree + + +@export_case( + example_inputs=({1: torch.randn(3, 2), 2: torch.randn(3, 2)},), + support_level=SupportLevel.SUPPORTED, +) +def pytree_flatten(x): + """ + Pytree from PyTorch cannot be captured by TorchDynamo. + """ + y, spec = pytree.tree_flatten(x) + return y[0] + 1 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py new file mode 100644 index 0000000000000000000000000000000000000000..dd6a7aa584282e367ce61d42c049b6b9703d435e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case +from torch.export import Dim + +x = torch.ones(3, 2) +dim1_x = Dim("dim1_x") + +@export_case( + example_inputs=(x,), + tags={"torch.dynamic-shape"}, + dynamic_shapes={"x": {1: dim1_x}}, +) +def scalar_output(x): + """ + Returning scalar values from the graph is supported, in addition to Tensor + outputs. Symbolic shapes are captured and rank is specialized. + """ + return x.shape[1] + 1 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py new file mode 100644 index 0000000000000000000000000000000000000000..743a357fc13ca984369cdddadf31bb4ee27e9109 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py @@ -0,0 +1,29 @@ +from enum import Enum + +import torch + +from torch._export.db.case import export_case + + +class Animal(Enum): + COW = "moo" + + +@export_case( + example_inputs=(torch.ones(3, 2),), +) +class SpecializedAttribute(torch.nn.Module): + """ + Model attributes are specialized. + """ + + def __init__(self): + super().__init__() + self.a = "moo" + self.b = 4 + + def forward(self, x): + if self.a == Animal.COW.value: + return x * x + self.b + else: + raise ValueError("bad") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py new file mode 100644 index 0000000000000000000000000000000000000000..c258e430f7ea0fa4a5b58ef9d6988e936fbb0f3f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2, 2),), + tags={"python.control-flow"}, +) +class StaticIf(torch.nn.Module): + """ + `if` statement with static predicate value should be traced through with the + taken branch. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + if len(x.shape) == 3: + return x + torch.ones(1, 1, 1) + + return x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py new file mode 100644 index 0000000000000000000000000000000000000000..3aaedb9a9d52b09f2a67345f5ee8ff62c54e2545 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py @@ -0,0 +1,16 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.randn(3, 2), "attr"), + tags={"python.builtin"}, + support_level=SupportLevel.SUPPORTED, +) +def tensor_setattr(x, attr): + """ + setattr() call onto tensors is not supported. + """ + setattr(x, attr, torch.randn(3, 2)) + return x + 4 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py new file mode 100644 index 0000000000000000000000000000000000000000..b9f4dd8f8496ccfd6c81b7007a96d9a05e6ffce5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.operator"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class TorchSymMin(torch.nn.Module): + """ + torch.sym_min operator is not supported in export. + """ + + def forward(self, x): + return x.sum() + torch.sym_min(x.size(0), 100) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b304649e7084717528679a71ab99539d90ebea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py @@ -0,0 +1,31 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel, export_rewrite_case + + +class A: + @classmethod + def func(cls, x): + return 1 + x + + +@export_case( + example_inputs=(torch.ones(3, 4),), + tags={"python.builtin"}, + support_level=SupportLevel.SUPPORTED, +) +def type_reflection_method(x): + """ + type() calls on custom objects followed by method calls are not allowed + due to its overly dynamic nature. + """ + a = A() + return type(a).func(x) + + +@export_rewrite_case(parent=type_reflection_method) +def type_reflection_method_rewrite(x): + """ + Custom object class methods will be inlined. + """ + return A.func(x) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb16cd64a56fce4c4ccfdbb257f32f11514439c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py @@ -0,0 +1,18 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.mutation"}, + support_level=SupportLevel.SUPPORTED, +) +class UserInputMutation(torch.nn.Module): + """ + Directly mutate user input in forward + """ + + def forward(self, x): + x.mul_(2) + return x.cos() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/gen_example.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/gen_example.py new file mode 100644 index 0000000000000000000000000000000000000000..301cf42beb062dd5ad9763507417de57fcc6e48d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/gen_example.py @@ -0,0 +1,28 @@ +import os +import sys + +import torch._export.db.examples as examples + +TEMPLATE = '''import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.randn(3, 2),), + tags={{}}, +) +def {case_name}(x): + """ + """ + + return +''' + +if __name__ == "__main__": + assert len(sys.argv) == 2 + root_dir = examples.__name__.replace(".", "/") + assert os.path.exists(root_dir) + with open(os.path.join(root_dir, sys.argv[1] + ".py"), "w") as f: + print("Writing to", f.name, "...") + f.write(TEMPLATE.format(case_name=sys.argv[1])) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/db/logging.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..fc412b8c5082dd8c4346711314fc7cc43c1a9ba2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/db/logging.py @@ -0,0 +1,2 @@ +def exportdb_error_message(case_name: str): + return "" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/exported_program.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/exported_program.py new file mode 100644 index 0000000000000000000000000000000000000000..74a669927134e14934a60ac247bca3ae68d1efe0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/exported_program.py @@ -0,0 +1,430 @@ +import copy +from collections import defaultdict +import dataclasses +from typing import Dict, List, Optional, Tuple +import warnings + +import sympy + +import torch +import torch.fx + +import torch.utils._pytree as pytree +from torch._subclasses.fake_tensor import FakeTensor +from torch.fx.experimental.symbolic_shapes import SymInt +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo +from torch.utils._sympy.value_ranges import ValueRanges + +from torch._export.passes.add_runtime_assertions_for_constraints_pass import ( + InputDim, +) + + +# TODO(ycao): This is added to avoid breaking existing code temporarily. +# Remove when migration is done. +from torch.export.graph_signature import ( + ExportBackwardSignature, + ExportGraphSignature, +) + +from torch.export.exported_program import ( + ExportedProgram, + ModuleCallEntry, + ModuleCallSignature, +) + +from .utils import _check_input_constraints_pre_hook + + +__all__ = [ + "ExportBackwardSignature", + "ExportGraphSignature", + "ExportedProgram", + "ModuleCallEntry", + "ModuleCallSignature", +] + + +# Information to maintain user calling/returning specs +@dataclasses.dataclass +class CallSpec: + in_spec: Optional[pytree.TreeSpec] + out_spec: Optional[pytree.TreeSpec] + + +def _unlift(gm, inp_pos_to_param_buffer_name, in_spec, out_spec, state_dict, tensor_constants, buffers_to_mutate): + count = 0 + buffer_name_to_node = {} + # Step 1: make lifted params as get_attr + for node in gm.graph.nodes: + if node.op == "placeholder": + if count in inp_pos_to_param_buffer_name: + with gm.graph.inserting_after(node): + getattr_node = gm.graph.get_attr( + inp_pos_to_param_buffer_name[count] + ) + node.replace_all_uses_with(getattr_node) + metadata = node.meta + gm.graph.erase_node(node) + getattr_node.meta = metadata + buffer_name_to_node[inp_pos_to_param_buffer_name[count]] = getattr_node + + count += 1 + # Step 2: Find the all the buffers that were mutated and update them + if node.op == "output": + user_output_nodes = [] + # In the case that the same node is returned multiple times, + # node.all_input_nodes will only iterate that node once + for return_node in pytree.tree_flatten(node.args)[0]: + return_node_name = return_node.name + # we found a param/buffer mutation + if return_node_name in buffers_to_mutate: + # TODO Fix situation here to replace dot with underscore... + buffer_node_name = buffers_to_mutate[return_node_name].replace('.', '_') + assert buffer_node_name in buffer_name_to_node + buffer_node = buffer_name_to_node[buffer_node_name] + with gm.graph.inserting_before(node): + gm.graph.call_function( + torch.ops.aten.copy_.default, (buffer_node, return_node) + ) + else: + user_output_nodes.append(return_node) + with gm.graph.inserting_before(node): + # Only return user outputs + new_output = gm.graph.output(tuple(user_output_nodes)) + node.replace_all_uses_with(new_output) + gm.graph.erase_node(node) + + # Step 3: Fix the input/output of the graph now that we deleted + # some args. + gm.graph.lint() + + if ( + in_spec.type == tuple and + len(in_spec.children_specs) == 2 and + in_spec.children_specs[0].type == tuple and + in_spec.children_specs[1].type == dict + ): + # if in_spec contains the args (tuple) and kwargs (dict) + + num_args = ( + len(in_spec.children_specs[0].children_specs) + + len(in_spec.children_specs[1].children_specs) + ) + else: + num_args = len(in_spec.children_specs) + + names = [f"arg_{i}" for i in range(num_args)] + + gm.graph._codegen = _PyTreeCodeGen( + _PyTreeInfo( + names, + in_spec, + out_spec, + ) + ) + gm.recompile() + + # Step 4: Find state references in HigherOrderOps and recursively + # fix them. + for node in gm.graph.nodes: + if node.op == "call_function" and node.target == torch.ops.cond: + pred, true_graph, false_graph, operands = node.args + true_gm = getattr(gm, true_graph.name) + false_gm = getattr(gm, false_graph.name) + inp_pos_to_param_buffer_name_for_submod = {} + real_operands = [] + for ix, operand in enumerate(operands): + if operand.target in inp_pos_to_param_buffer_name.values(): + inp_pos_to_param_buffer_name_for_submod[ix] = operand.target + if operand.target in state_dict: + value = state_dict[operand.target] + elif operand.target in tensor_constants: + value = tensor_constants[operand.target] + else: + raise RuntimeError("Unable to find value for ", operand.target) + true_gm.register_buffer(operand.target, value) + false_gm.register_buffer(operand.target, value) + else: + real_operands.append(operand) + node.args = (pred, true_graph, false_graph, real_operands) + + _, in_spec = pytree.tree_flatten(real_operands) + + _unlift( + true_gm, + inp_pos_to_param_buffer_name_for_submod, + in_spec, + None, + state_dict, + tensor_constants, + buffers_to_mutate, + ) + _unlift( + false_gm, + inp_pos_to_param_buffer_name_for_submod, + in_spec, + None, + state_dict, + tensor_constants, + buffers_to_mutate, + ) + if node.op == "call_function" and node.target.__name__ == "map_impl": + body_graph, num_mapped, *operands = node.args + body_gm = getattr(gm, body_graph.name) + inp_pos_to_buffer_name_for_submod = {} + real_operands = [] + # TODO Fix situation here to replace dot with underscore... + state_dict_for_lookup = { + key.replace(".", "_"): value + for key, value in state_dict.items() + } + for ix, operand in enumerate(operands): + if operand.target in inp_pos_to_param_buffer_name.values(): + inp_pos_to_buffer_name_for_submod[ix] = operand.target + if operand.target in state_dict_for_lookup: + value = state_dict_for_lookup[operand.target] + elif operand.target in tensor_constants: + value = tensor_constants[operand.target] + else: + raise RuntimeError(f"Unable to find value for {operand.target}") + body_gm.register_buffer(operand.target, value) + else: + real_operands.append(operand) + node.args = (body_graph, num_mapped, *real_operands) + + _, in_spec = pytree.tree_flatten(real_operands) + + _unlift( + body_gm, + inp_pos_to_buffer_name_for_submod, + in_spec, + None, + state_dict, + tensor_constants, + buffers_to_mutate, + ) + gm.graph.lint() + gm.graph.eliminate_dead_code() + gm.recompile() + return gm + +def _construct_inp_pos_to_param_buffer_name(new_gm, graph_signature, state_dict, tensor_constants=None): + # TODO Fix the period in params/buffers names later + # maybe a pass to replace graph signature with fixed names + param_buffer_name_to_corrected_name = {} + + for name, value in state_dict.items(): + if name in graph_signature.buffers: + if "." in name: + new_gm.register_buffer(name.replace(".", "_"), value) + param_buffer_name_to_corrected_name[name] = name.replace(".", "_") + else: + new_gm.register_buffer(name, value) + if name in graph_signature.parameters: + if "." in name: + new_gm.register_parameter(name.replace(".", "_"), value) + param_buffer_name_to_corrected_name[name] = name.replace(".", "_") + else: + new_gm.register_parameter(name, value) + + if tensor_constants is not None and len(tensor_constants) > 0: + assert hasattr(graph_signature, "lifted_tensor_constants") + for name, value in tensor_constants.items(): + if name in graph_signature.lifted_tensor_constants: + new_gm.register_buffer(name, value) + param_buffer_name_to_corrected_name[name] = name + + count = 0 + inp_pos_to_param_buffer_name = {} + for node in new_gm.graph.nodes: + if node.op == "placeholder": + if node.name in graph_signature.inputs_to_buffers: + buffer_name = graph_signature.inputs_to_buffers[node.name] + if buffer_name in param_buffer_name_to_corrected_name: + inp_pos_to_param_buffer_name[ + count + ] = param_buffer_name_to_corrected_name[buffer_name] + else: + inp_pos_to_param_buffer_name[count] = buffer_name + if node.name in graph_signature.inputs_to_parameters: + param_name = graph_signature.inputs_to_parameters[node.name] + if param_name in param_buffer_name_to_corrected_name: + inp_pos_to_param_buffer_name[ + count + ] = param_buffer_name_to_corrected_name[param_name] + else: + inp_pos_to_param_buffer_name[count] = param_name + if hasattr(graph_signature, "inputs_to_lifted_tensor_constants"): + if node.name in graph_signature.inputs_to_lifted_tensor_constants: + inp_pos_to_param_buffer_name[ + count + ] = graph_signature.inputs_to_lifted_tensor_constants[node.name] + count += 1 + + return inp_pos_to_param_buffer_name + + +class _StatefulGraphModuleFactory(type): + """ + Metaclass that ensures a private constructor for _StatefulGraphModule + """ + + def __call__(cls, *args, **kwargs): + raise TypeError( + f"{cls.__module__}.{cls.__qualname__} has no public constructor. " + ) + + def _create(cls, root, graph, range_constraints=None, equality_constraints=None): + return super().__call__( + root, + graph, + range_constraints=range_constraints, + equality_constraints=equality_constraints + ) + + +class _StatefulGraphModule(torch.fx.GraphModule, metaclass=_StatefulGraphModuleFactory): + def __init__(self, root, graph, range_constraints=None, equality_constraints=None): + super().__init__(root, graph) + self.range_constraints = range_constraints or [] + self.equality_constraints = equality_constraints or [] + + +def _create_stateful_graph_module(plain_graph_module: torch.fx.GraphModule, range_constraints, equality_constraints): + stateful_gm = _StatefulGraphModule._create( + plain_graph_module, + plain_graph_module.graph, + range_constraints=range_constraints, + equality_constraints=equality_constraints + ) + stateful_gm.register_forward_pre_hook(_check_input_constraints_pre_hook, with_kwargs=True) + return stateful_gm + + +def unlift_exported_program_lifted_states(ep: torch.export.ExportedProgram) -> torch.nn.Module: + new_gm = copy.deepcopy(ep.graph_module) + inp_pos_to_param_buffer_name = _construct_inp_pos_to_param_buffer_name( + new_gm, ep.graph_signature, ep.state_dict, ep.tensor_constants + ) + new_gm = _unlift( + new_gm, + inp_pos_to_param_buffer_name, + ep.call_spec.in_spec, + ep.call_spec.out_spec, + ep.state_dict, + ep.tensor_constants, + ep.graph_signature.buffers_to_mutate, + ) + unlift_gm = _create_stateful_graph_module(new_gm, ep.range_constraints, ep.equality_constraints) + unlift_gm.meta.update(ep.graph_module.meta) + return unlift_gm + + +def _create_graph_module_for_export(root, graph): + try: + gm = torch.fx.GraphModule(root, graph) + except SyntaxError: + # If custom objects stored in memory are being used in the graph, + # the generated python code will result in a syntax error on the custom + # object, since it is unable to parse the in-memory object. However + # we can still run the graph eagerly through torch.fx.Interpreter, + # so we will bypass this error. + warnings.warn( + "Unable to execute the generated python source code from " + "the graph. The graph module will no longer be directly callable, " + "but you can still run the ExportedProgram, and if needed, you can " + "run the graph module eagerly using torch.fx.Interpreter." + ) + gm = torch.fx.GraphModule(root, torch.fx.Graph()) + gm._graph = graph + + return gm + + +def _process_constraints( + graph_module: torch.fx.GraphModule, + num_lifted_params_buffers: int, + example_inputs: List[torch.Tensor], +) -> Tuple[Dict[sympy.Symbol, ValueRanges], List[Tuple[InputDim, InputDim]]]: + """ + Process the constraints stored in the graph module to return something more readable. + + Args: + graph_module (torch.fx.GraphModule): GraphModule returned from + dynamo.export, which contains the "input_shape_constraints" and + "inline_constraints" metadata + + example_inputs: Flattened list of example inputs used to export the graph module + + Returns: + range_constraints (Dict[sympy.Symbol, ValueRanges]): Mapping of + symbols (from SymInts) appearing in the fake tensors in + node.meta["val"] to their range constraints, which are a tuple + containing (lower, upper) constraints. + + equality_constraints (List[Tuple[InputDim, InputDim]]): List of tuples + of (node, dim) to mark that these dimensions are equal. + """ + input_shape_constraints = graph_module.meta.get("input_shape_constraints", []) + inline_constraints = graph_module.meta.get("inline_constraints", []) + + # Create dict mapping tensor_id to node names + tensor_id_to_nodes: Dict[int, List[str]] = defaultdict(list) + # Create dict mapping placeholder node names to their nodes + placeholder_nodes: Dict[str, torch.fx.Node] = {} + for i, node in enumerate(graph_module.graph.nodes): + if node.op != "placeholder": + # All placeholder nodes should be together in the beginning of the + # graph + break + if i >= num_lifted_params_buffers: + example_input = example_inputs[i - num_lifted_params_buffers] + tensor_id_to_nodes[id(example_input)].append(node.name) + placeholder_nodes[node.name] = node + + # Create list of (node name, dim) tuples to mark that they are equal + equality_constraints: List[Tuple[InputDim, InputDim]] = [] + # Create dict mapping (node name, dim) a list of range (lower, upper) + # constraints + multi_range_constraints: Dict[InputDim, List[ValueRanges]] = defaultdict(list) + for constraint in input_shape_constraints: + for node in tensor_id_to_nodes[constraint["t_id"]]: + node_dim = InputDim(node, constraint["dim"]) + + # Accumulate range constraints + multi_range_constraints[node_dim].append( + ValueRanges(constraint["min"], constraint["max"]) + ) + + # Accumulate equality constraints + if shared := constraint.get("shared", None): + for other_node in tensor_id_to_nodes[shared["t_id"]]: + other_node_dim = InputDim(other_node, shared["dim"]) + equality_constraints.append((node_dim, other_node_dim)) + + # Create dict mapping symbol to a singular range (lower, upper) + range_constraints: Dict[sympy.Symbol, ValueRanges] = {} + + # Add inline constraints to range_constraints + range_constraints = {symbol: inline_constraints[symbol] for symbol in inline_constraints} + + # Add input range constraints to range_constraints + for input_dim, multi_range_constraint in multi_range_constraints.items(): # type: ignore[assignment] + # Simplify the range constraints into a single range constraint + # Ex. ranges [2, 10] and [3, 11] would get merged to [3, 10] + min_vals = [rc.lower for rc in multi_range_constraint] + max_vals = [rc.upper for rc in multi_range_constraint] + min_val = max(min_vals) # type: ignore[type-var] + max_val = min(max_vals) # type: ignore[type-var] + assert min_val <= max_val # type: ignore[operator] + + # Add input node range constraints + val = placeholder_nodes[input_dim.input_name].meta["val"] + assert isinstance(val, FakeTensor) + symint = val.shape[input_dim.dim] + assert isinstance(symint, SymInt), f"Expected SymInt but got {symint}: {type(symint)}" + symbol = symint.node._expr + range_constraints[symbol] = ValueRanges(min_val, max_val) + + return range_constraints, equality_constraints diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9ce2ac03c23600c86ff02e38a2a4bfeefef9e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__init__.py @@ -0,0 +1 @@ +from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45cf863f2d0d0d1db43c583452a899ba27c6ba56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16a89a7f66e1bdeac3341df6ab4f7f04eada4876 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ee02dae6d3ce37195a8fcbb76df478c3218b93f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e342472cf7850a6f73add78115d16d4d24154a9d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constant_tensor_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constant_tensor_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ddfe97d20c3a0d1b1d8c828a10bda82500c0970 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constant_tensor_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a0b152e810d29f2b025e41f73ea3702c970f789 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4f6055c06c3eece8fed63f92e79694167235a01 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38fd5a7ce62066a478588dbdad5cb6ed677d0ec6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc747933b9255a13a8d536b15f4368411a8eba6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py @@ -0,0 +1,312 @@ +import copy +import math +import operator +import traceback +from collections import OrderedDict +from functools import partial +from typing import Any, Callable, Dict, List, NamedTuple, Set, Tuple + +import sympy + +import torch +import torch.fx +from torch.fx.experimental.symbolic_shapes import SymInt +from torch._export.pass_base import _ExportPassBase, ProxyValue, PassResult +from torch._subclasses.fake_tensor import FakeTensor +from torch.utils._sympy.value_ranges import ValueRanges + + +__all__ = ["_AddRuntimeAssertionsForConstraintsPass", "InputDim"] + + +class InputDim(NamedTuple): + input_name: str + dim: int + + +def _convert_to_int(val): + # Convert simple sympy Integers into concrete int + if val == sympy.oo: + return math.inf + if val == -sympy.oo: + return -math.inf + if isinstance(val, sympy.Integer): + return int(val) + raise RuntimeError( + "Export constraints cannot be non-integer expressions" + ) + + +def _convert_range_to_int(range: ValueRanges): + assert isinstance(range, ValueRanges) + min_val = _convert_to_int(range.lower) + max_val = _convert_to_int(range.upper) + return min_val, max_val + + +class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBase): + def __init__( + self, + range_constraints: Dict[sympy.Symbol, ValueRanges], + equality_constraints: List[Tuple[InputDim, InputDim]], + ): + super().__init__() + self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints + self.equality_constraints: List[Tuple[InputDim, InputDim]] = equality_constraints + self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set() + self.counter = 0 + + def _assert_range_constraint(self, proxy, lower, upper, assert_msg): + if lower > -math.inf: + self._insert_assert_async(operator.ge, proxy, lower, assert_msg) + + if upper < math.inf: + self._insert_assert_async(operator.le, proxy, upper, assert_msg) + + def _insert_assert_async(self, operator, lower, upper, assert_msg): + """ + Inserts assert_async call_function nodes in the graph. This function is + called **during** the interpreter-based pass. + """ + self.counter += 1 + cmp = super().call_operator(operator, (lower, upper), {}, self._create_dummy_node_metadata()) + cmp_tensor = super().call_operator(torch.ops.aten.scalar_tensor.default, (cmp,), {}, self._create_dummy_node_metadata()) + super().call_operator( + torch.ops.aten._assert_async.msg, + (cmp_tensor, assert_msg), + {}, + self._create_dummy_node_metadata(), + ) + + def call_operator(self, op, args, kwargs, meta) -> ProxyValue: + ret = super().call_operator(op, args, kwargs, meta) + if "val" not in meta: + return ret + + val = meta["val"] + + # In general, we may have to deal the case such as: ret[1].shape[0]. + # We need first find out what symbols require assertion, then we need to follow the path + # from ret to the symbol, construct the proxies along the way and construct the messages + # piece-wise at the same time. + # + # We use post-order traversal to collect all the proxies callbacks needed, construct + # the error message callbacks, and at the top-level traversal tree we execute all the callbacks. + # We need the callbacks because, in order to call the function to create a proxy for shape[0], we + # need the proxy for shape, which further requires the proxy for ret[1], etc. + def add_assertions(val): + call_backs: List[Callable] = [] + messages: List[str] = [] + if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)): + symbol = val.node._expr + if isinstance(symbol, sympy.Symbol) and symbol.name.startswith("i"): + if symbol in self._asserts_generated_unbacked_symbols: + return call_backs, messages + # We only care about unbacked symints for these inline + # constraints, which are prefixed with 'i' + constraint = self.range_constraints[symbol] + min_val, max_val = _convert_range_to_int(constraint) + assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]." + call_backs.append( + partial(self._assert_range_constraint, lower=min_val, upper=max_val) + ) + messages.append(assert_msg) + self._asserts_generated_unbacked_symbols.add(symbol) + elif isinstance(val, torch.Tensor): + for i, sym in enumerate(val.shape): + cbs, msgs = add_assertions(sym) + for cb, msg in zip(cbs, msgs): + def sym_size_cb(proxy, assert_msg, dim): + dim_proxy = super( + _AddRuntimeAssertionsForInlineConstraintsPass, + self + ).call_operator( + torch.ops.aten.sym_size.int, + (proxy, dim), + {}, + self._create_dummy_node_metadata(), + ) + cb(proxy=dim_proxy, assert_msg=assert_msg) + call_backs.append(partial(sym_size_cb, dim=i)) + messages.append(f".shape[{i}]" + msg) + return call_backs, messages + callbacks, messages = add_assertions(val) + for cb, msg in zip(callbacks, messages): + cb(proxy=ret, assert_msg=f"{ret.node}" + msg) + return ret + + def call(self, graph_module): + # Add runtime asserts for inline constraints + val = super().call(graph_module) + + # Sometimes this pass would return a wrong graph where we have mismatched + # node names in signature. Before we fix it, let's just skip it. + if self.counter == 0 and type(self) is _AddRuntimeAssertionsForInlineConstraintsPass: + return PassResult(graph_module, False) + + # Populate the stack trace with dummy vals to respect IR + for node in val.graph_module.graph.nodes: + if not node.meta.get("stack_trace", None): + node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1)) + + return PassResult(val.graph_module, val.modified) + + +class _AddRuntimeAssertionsForConstraintsPass(_AddRuntimeAssertionsForInlineConstraintsPass): + def __init__( + self, + range_constraints: Dict[sympy.Symbol, ValueRanges], + equality_constraints: List[Tuple[InputDim, InputDim]], + ): + super().__init__(range_constraints, equality_constraints) + + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: + graph_module = copy.deepcopy(graph_module) + graph = graph_module.graph + + insert_loc = None + for node in graph.nodes: + if node.op != "placeholder": + continue + insert_loc = node + if insert_loc is None: + return super().call(graph_module) + + # Add runtime asserts for input shape constraints. We do this after all + # placeholder nodes so that we can handle both (unary) predicates and + # (binary) relations. + inputdim_to_node: Dict[InputDim, torch.fx.Node] = OrderedDict() + for node in graph.nodes: + if node.op != "placeholder": + continue + + if ( + "val" not in node.meta or node.meta["val"] is None + ): + continue + + if not isinstance(node.meta["val"], FakeTensor): + # it has to be a prim value + self._insert_prim_assert_inplace(graph, node, node.meta["val"]) + else: + fake_tensor_shape = node.meta["val"].shape + for dim, shape in enumerate(fake_tensor_shape): + with graph.inserting_after(insert_loc): + dim_node = graph.call_function( + torch.ops.aten.sym_size.int, (node, dim) + ) + input_dim = InputDim(node.name, dim) + inputdim_to_node[input_dim] = dim_node + insert_loc = dim_node + + if isinstance(shape, SymInt): + # If the shape is dynamic, add range assertions + symbol = shape.node._expr + if symbol in self.range_constraints: + self._insert_range_assert_inplace( + graph, input_dim, dim_node, self.range_constraints[symbol] + ) + else: + # If no dynamism is specified, we assume all dimensions # + # are specialized + assert isinstance(shape, int) + self._insert_specialized_shape_assert_inplace( + graph, input_dim, dim_node, shape, + ) + + # Add runtime assertions on equality constraints on the inputs + if len(inputdim_to_node) > 0: + with graph.inserting_after( + list(inputdim_to_node.values())[-1] + ): + self._insert_equality_assert_inplace(graph, inputdim_to_node) + + return super().call(graph_module) + + def _insert_specialized_shape_assert_inplace( + self, graph: torch.fx.Graph, input_dim: InputDim, dim_node: torch.fx.Node, shape: int, + ): + assert_msg = f"Input {input_dim.input_name}.shape[{input_dim.dim}] is specialized at {shape}" + with graph.inserting_after(dim_node): + eq_node = graph.call_function(operator.eq, (dim_node, shape)) + with graph.inserting_after(eq_node): + tensor_eq_node = graph.call_function(torch.ops.aten.scalar_tensor.default, (eq_node,)) + with graph.inserting_after(tensor_eq_node): + _ = graph.call_function(torch.ops.aten._assert_async.msg, (tensor_eq_node, assert_msg)) + + def _insert_prim_assert_inplace(self, graph, node: torch.fx.Node, value: Any): + assert_msg = ( + f"Input {node.name} is specialized to be {value} at tracing time," + f"it is not supported to pass in a different value at run time." + ) + with graph.inserting_after(node): + eq_node = graph.call_function(operator.eq, (node, value)) + with graph.inserting_after(eq_node): + tensor_eq_node = graph.call_function(torch.ops.aten.scalar_tensor.default, (eq_node,)) + with graph.inserting_after(tensor_eq_node): + _ = graph.call_function(torch.ops.aten._assert_async.msg, (tensor_eq_node, assert_msg)) + + def _insert_range_assert_inplace( + self, graph: torch.fx.Graph, input_dim: InputDim, dim_node: torch.fx.Node, range: ValueRanges + ): + """ + Add runtime asserts for user-specified range constraints for + each placeholder's dynamic dimension. + """ + + min_val, max_val = _convert_range_to_int(range) + assert_msg = ( + f"Input {input_dim.input_name}.shape[{input_dim.dim}] is " + f"outside of specified dynamic range [{min_val}, {max_val}]" + ) + # TODO (tmanlaibaatar) we are making an assumption that graph generated for + # input dim N >=2 generalizes to N < 2. Ideally we should check that: + # 1. if we can generalize to N < 2, not add any assertion saying N >= 2 + # 2. If we can't generalize to N < 2, add an assertion saying N >= 2 + # Above can be achieved via a separate pass. + with graph.inserting_after(dim_node): + if min_val > 2: + self._insert_assert_async_inplace( + graph, operator.ge, (dim_node, min_val), assert_msg, + ) + + if max_val < math.inf: + self._insert_assert_async_inplace( + graph, operator.le, (dim_node, max_val), assert_msg, + ) + + def _insert_equality_assert_inplace( + self, + graph: torch.fx.Graph, + inputdim_to_node: Dict[InputDim, torch.fx.Node], + ): + for input_dim, other_input_dim in self.equality_constraints: + dim_node = inputdim_to_node[input_dim] + assert_msg = ( + f"Input {input_dim.input_name}.shape[{input_dim.dim}] is " + f"not equal to input {other_input_dim.input_name}.shape[{other_input_dim.dim}]" + ) + + other_dim_node = inputdim_to_node[other_input_dim] + self._insert_assert_async_inplace( + graph, + operator.eq, + (dim_node, other_dim_node), + assert_msg + ) + + def _insert_assert_async_inplace(self, graph, operator, args, assert_msg): + """ + Inserts assert_async call_function nodes in the graph. This function is + called before we run the interpreter-based pass and does an inplace + insertion. + """ + cmp_node = graph.call_function(operator, args) + with graph.inserting_after(cmp_node): + cmp_tensor_node = graph.call_function( + torch.ops.aten.scalar_tensor.default, (cmp_node,) + ) + with graph.inserting_after(cmp_tensor_node): + _ = graph.call_function( + torch.ops.aten._assert_async.msg, (cmp_tensor_node, assert_msg) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2b9c674859f4eefd56033cf37536a1b532ae65 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py @@ -0,0 +1,66 @@ +import operator + +import torch + +from torch.export.exported_program import ConstantArgument, TensorArgument +from torch.fx.passes.infra.pass_base import PassBase, PassResult + +__all__ = ["CollectTracepointsPass"] + + +class CollectTracepointsPass(PassBase): + """ + Performs constant folding and constant propagation. + """ + + def __init__(self, specs, sig) -> None: + super().__init__() + self.specs = specs + self.sig = sig + + def call(self, gm): + def get_arg_spec(arg): + if isinstance(arg, torch.fx.Node): + if isinstance(arg.meta.get("val"), torch.Tensor): + return TensorArgument(name=arg.name) + else: + raise AssertionError( + "Symint input is not implemented yet for submodule call signature." + ) + else: + return ConstantArgument(value=arg) + + for module in gm.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.op != "call_function": + continue + if node.target == torch.ops.higher_order._export_tracepoint: + for i, arg in enumerate(node.args): + kind = node.kwargs["kind"] + if kind == "module_call_inputs": + self.specs[node.kwargs["path"]].inputs.append( + get_arg_spec(arg) + ) + elif kind == "module_call_outputs": + self.specs[node.kwargs["path"]].outputs.append( + get_arg_spec(arg) + ) + else: + raise AssertionError(f"Unknown tracepoint kind: {kind}") + if isinstance(arg, torch.fx.Node): + for user in node.users: + assert user.op == "call_function" + assert user.target == operator.getitem + assert isinstance(user.args[1], int) + if user.args[1] == i: + user.replace_all_uses_with(arg) + self.sig.replace_all_uses(user.name, arg.name) + break + users = list(node.users) + for user in users: + assert len(user.users) == 0 + gm.graph.erase_node(user) + gm.graph.erase_node(node) + return PassResult(gm, True) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..4b0dd4aa05bbf089e0ba29834816fc8814be40ff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py @@ -0,0 +1,94 @@ +import copy +from typing import Dict, Optional, Tuple, List + +import torch +from torch._export.pass_base import _ExportPassBase, PassResult, Argument +from torch._export.pass_infra.node_metadata import NodeMetadata +from torch._export.pass_infra.proxy_value import ProxyValue +from torch._ops import OpOverload + +aten = torch.ops.aten + +_NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: Dict[OpOverload, OpOverload] = { + aten.sym_constrain_range.default: aten._functional_sym_constrain_range, + aten._assert_async.msg: aten._functional_assert_async.msg, +} + + +class _FunctionalizeSideEffectfulOpsPass(_ExportPassBase): + """ + Functionalize ops with side effect in graph module by replacing the op with + functional version of it. A new dependency token (`dep_token`) will be + created and propagated through functional ops to output. + For example: + ``` + def f(x): + sym_constrain_range(x.shape[0], min=1, max=3) + return x.add(3) + ``` + Will be transformed to: + ``` + def f(x): + dep_token0 = _make_dep_token() + dep_token1 = _functional_sym_constrain_range( + x.shape[0], min=1, max=3, dep_token=dep_token0 + ) + + return x.add(3), dep_token1 + ``` + """ + + def __init__(self) -> None: + super().__init__() + self._dep_token: Optional[ProxyValue] = None + self._next_dep_token_index: Optional[int] = None + + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: + # Early return if no non-functional assertions. + if not any( + n.target in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS + for n in graph_module.graph.nodes + ): + return PassResult(graph_module=graph_module, modified=False) + + gm = copy.deepcopy(graph_module) + self._dep_token = None + self._next_dep_token_index = None + return super().call(gm) + + def call_operator( + self, + op: OpOverload, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + if op not in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: + return super().call_operator(op, args, kwargs, meta) + + if self._dep_token is None: + self._dep_token = super().call_operator( + aten._make_dep_token, + args=(), + kwargs={}, + meta=self._create_dummy_node_metadata(), + ) + self._dep_token.node.name = "dep_token0" + self._next_dep_token_index = 1 + + self._dep_token = super().call_operator( + _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS[op], + args=args, + kwargs={**kwargs, "dep_token": self._dep_token}, + meta=meta, + ) + assert self._next_dep_token_index is not None + self._dep_token.node.name = f"dep_token{self._next_dep_token_index}" + self._next_dep_token_index += 1 + + return self._dep_token + + def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue: + assert self._dep_token is not None + + return super().output(results=(*results, self._dep_token), meta=meta) # type: ignore[arg-type] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/lift_constant_tensor_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/lift_constant_tensor_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..8399273660265435a64b82f622c9fb6bf62ce392 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/lift_constant_tensor_pass.py @@ -0,0 +1,70 @@ +from typing import Dict + +import torch +from torch._guards import detect_fake_mode +from torch.export.exported_program import InputKind, InputSpec, TensorArgument + + +def lift_constant_tensor_pass(gm, graph_signature) -> Dict[str, torch.Tensor]: + """ + Takes an ExportedProgram and returns the ExportedProgram modified in-place, + with the constant tensors as buffers. + """ + if len([node for node in gm.graph.nodes if node.op == "placeholder"]) == 0: + return {} + + inputs = graph_signature.input_specs + num_tensor_constants = sum( + input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs + ) + + fake_mode = detect_fake_mode( + tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder") + ) + assert fake_mode is not None + + first_user_input_loc, first_user_input = None, None + for i, node in enumerate(gm.graph.nodes): + if node.op == "placeholder" and node.name in graph_signature.user_inputs: + first_user_input = node + first_user_input_loc = i + break + + assert first_user_input is not None and first_user_input_loc is not None + tensor_constants = {} + + for node in gm.graph.nodes: + if node.op == "get_attr": + constant_tensor = getattr(gm, node.target) + if not isinstance(constant_tensor, torch.Tensor): + continue + + constant_tensor_fqn = f"_lifted_tensor_constant{num_tensor_constants}" + num_tensor_constants += 1 + + with gm.graph.inserting_before(first_user_input): + # Insert the constant node before the first user input + const_placeholder_node = gm.graph.placeholder(constant_tensor_fqn) + for k, v in node.meta.items(): + const_placeholder_node.meta[k] = v + const_placeholder_node.meta["val"] = fake_mode.from_tensor( + constant_tensor, static_shapes=True + ) + const_placeholder_node.meta["val"].constant = constant_tensor + node.replace_all_uses_with(const_placeholder_node) + gm.graph.erase_node(node) + + # Add the constant as a buffer to the graph signature + graph_signature.input_specs.insert( + first_user_input_loc, + InputSpec( + kind=InputKind.CONSTANT_TENSOR, + arg=TensorArgument(name=const_placeholder_node.name), + target=constant_tensor_fqn, + ), + ) + tensor_constants[constant_tensor_fqn] = constant_tensor + first_user_input_loc += 1 + + gm.recompile() + return tensor_constants diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py new file mode 100644 index 0000000000000000000000000000000000000000..adcc708e554830b430db0d4374f4494482ce0b39 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py @@ -0,0 +1,26 @@ +import torch +from torch.fx.passes.infra.pass_base import PassBase, PassResult + + +class _RemoveRuntimeAssertionsPass(PassBase): + """ + Remove runtime assertions inserted by the + _AddRuntimeAssertionsForInlineConstraintsPass. + """ + + def call(self, graph_module) -> PassResult: + modified = False + for module in graph_module.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.target == torch.ops.aten._assert_async.msg: + assert_async_node = node + if len(assert_async_node.users) > 0: + continue + module.graph.erase_node(assert_async_node) + # the upstream scalar_tensor <- {le, ge} <- sym_size + # linear chain of nodes of nodes is removed by the + # downstream dead code elimination + modified = True + return PassResult(graph_module, modified) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..109a96d7b4bd3672660b1271b4d72e7fbb6b982f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/replace_sym_size_ops_pass.py @@ -0,0 +1,18 @@ +from typing import Dict + +import torch + +replacements: Dict[torch._ops.OpOverloadPacket, torch._ops.OpOverload] = { + torch.ops.aten.sym_size: torch.ops.aten.sym_size.int, + torch.ops.aten.sym_stride: torch.ops.aten.sym_stride.int, + torch.ops.aten.sym_numel: torch.ops.aten.sym_numel.default, +} + + +def _replace_sym_size_ops_pass(gm: torch.fx.GraphModule): + for module in gm.modules(): + if not isinstance(module, torch.fx.GraphModule): + continue + for node in module.graph.nodes: + if node.target in replacements: + node.target = replacements[node.target] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..850b8043d905865d690dad8a7a11b6ba37383cdd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/passes/replace_view_ops_with_view_copy_ops_pass.py @@ -0,0 +1,71 @@ +from typing import Dict, Optional, Set + +import torch +from torch._ops import OpOverload, OpOverloadPacket, HigherOrderOperator +from torch._export.error import InternalError +from torch._export.pass_base import _ExportPassBase + + +__all__ = ["ReplaceViewOpsWithViewCopyOpsPass"] + + +_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: Dict[OpOverload, OpOverload] = { + torch.ops.aten._unsafe_view.default: torch.ops.aten.view_copy.default, +} + +# TODO (tmanlaibaatar) remove this after https://github.com/pytorch/pytorch/pull/100749 +_BLACK_LISTED_OPS: Set[OpOverloadPacket] = { + torch.ops.aten.sym_size, + torch.ops.aten.sym_stride, + torch.ops.aten.sym_numel, +} + +def is_view_op(schema: torch._C.FunctionSchema) -> bool: + if len(schema.arguments) == 0: + return False + alias_info = schema.arguments[0].alias_info + return (alias_info is not None) and (not alias_info.is_write) + + +def get_view_copy_of_view_op(schema: torch._C.FunctionSchema) -> Optional[OpOverload]: + if is_view_op(schema) and schema.name.startswith("aten::"): + view_op_name = schema.name.split("::")[1] + view_op_overload = ( + schema.overload_name + if schema.overload_name != "" + else "default" + ) + view_copy_op_name = view_op_name + "_copy" + if not hasattr(torch.ops.aten, view_copy_op_name): + raise InternalError(f"{schema.name} is missing a view_copy variant") + + view_copy_op_overload_packet = getattr(torch.ops.aten, view_copy_op_name) + + if not hasattr(view_copy_op_overload_packet, view_op_overload): + raise InternalError(f"{schema.name} is missing a view_copy variant") + + return getattr(view_copy_op_overload_packet, view_op_overload) + + return None + + +class ReplaceViewOpsWithViewCopyOpsPass(_ExportPassBase): + """ + Our backend expects pure functional operators. For efficiency + purposes, we keep view ops around while functionalizing the exported + program. This pass replaces view ops with view copy ops for backends that + need AOT memory planning. + """ + def call_operator(self, op, args, kwargs, meta): + if op in _NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS: + return super().call_operator( + (_NON_FUNCTIONAL_OPS_TO_FUNCTIONAL_OPS[op]), args, kwargs, meta + ) + + if op in _BLACK_LISTED_OPS or isinstance(op, HigherOrderOperator): + return super().call_operator(op, args, kwargs, meta) + + if view_copy_op := get_view_copy_of_view_op(op._schema): + return super().call_operator(view_copy_op, args, kwargs, meta) + + return super().call_operator(op, args, kwargs, meta) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4515196f15caa5155d61eb5f26e9ab7847af7480 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d35f353f85bfd70ad73cd37b249a1d58a85ba154 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/__pycache__/schema.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/schema.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d92369096ab6c6a4d9f27733758f7716315219 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/schema.py @@ -0,0 +1,346 @@ +# NOTE: This is a placeholder for iterating on export serialization schema design. +# Anything is subject to change and no guarantee is provided at this point. + +from dataclasses import dataclass, fields +from enum import IntEnum +from typing import Dict, List, Optional, Tuple + + +# NOTE: Please update this value if any modifications are made to the schema +SCHEMA_VERSION = 2 +TREESPEC_VERSION = 1 + +# TODO (zhxchen17) Move to a separate file. +class _Union: + @classmethod + def create(cls, **kwargs): + assert len(kwargs) == 1 + return cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type] + + def __post_init__(self): + assert sum(1 for f in fields(self) if getattr(self, f.name) is not None) == 1 # type: ignore[arg-type, misc] + + @property + def value(self): + val = next((getattr(self, f.name) for f in fields(self) if getattr(self, f.name) is not None), None) # type: ignore[arg-type] + assert val is not None + return val + + @property + def type(self): + val_type = next((f.name for f in fields(self) if getattr(self, f.name) is not None), None) # type: ignore[arg-type] + assert val_type is not None + return val_type + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return f"{type(self).__name__}({self.type}={self.value})" + + +class ScalarType(IntEnum): + UNKNOWN = 0 + BYTE = 1 + CHAR = 2 + SHORT = 3 + INT = 4 + LONG = 5 + HALF = 6 + FLOAT = 7 + DOUBLE = 8 + COMPLEXHALF = 9 + COMPLEXFLOAT = 10 + COMPLEXDOUBLE = 11 + BOOL = 12 + BFLOAT16 = 13 + + +class Layout(IntEnum): + Unknown = 0 + SparseCoo = 1 + SparseCsr = 2 + SparseCsc = 3 + SparseBsr = 4 + SparseBsc = 5 + _mkldnn = 6 + Strided = 7 + + +class MemoryFormat(IntEnum): + Unknown = 0 + ContiguousFormat = 1 + ChannelsLast = 2 + ChannelsLast3d = 3 + PreserveFormat = 4 + + +@dataclass +class Device: + type: str + index: Optional[int] + + +@dataclass(repr=False) +class SymExprHint(_Union): + as_int: int + as_float: float + as_bool: bool + + +# This is for storing the symbolic expressions behind symints/symfloats/symbools +# For example, we can get something like +# SymExpr(expr_str="s0 + s1", hint=SymExprHint(as_int=4) +# if we also have the hint that s0 and s1 are both 2. +@dataclass +class SymExpr: + expr_str: str + hint: Optional[SymExprHint] = None + + +@dataclass(repr=False) +class SymInt(_Union): + as_expr: SymExpr + as_int: int + + +@dataclass(repr=False) +class SymBool(_Union): + as_expr: SymExpr + as_bool: bool + + +@dataclass +class TensorMeta: + dtype: ScalarType + sizes: List[SymInt] + requires_grad: bool + device: Device + strides: List[SymInt] + storage_offset: int + layout: Layout + + +# In most cases we will use the "as_name" field to store arguments which are +# SymInts. +# The "as_int" field is used in the case where we have a list containing a mix +# of SymInt and ints (ex. [1, s0, ...]). We will serialize this type of list to +# be List[SymIntArgument] and map the SymInts to the "as_name" field, and ints +# to the "as_int" field. +@dataclass(repr=False) +class SymIntArgument(_Union): + as_name: str + as_int: int + + +# In most cases we will use the "as_name" field to store arguments which are +# SymBools. +# The "as_bool" field is used in the case where we have a list containing a mix +# of SymBool and bools (ex. [True, i0, ...]). We will serialize this type of list to +# be List[SymboolArgument] and map the SymBools to the "as_name" field, and bools +# to the "as_bool" field. +@dataclass(repr=False) +class SymBoolArgument(_Union): + as_name: str + as_bool: bool + + +@dataclass +class TensorArgument: + name: str + + +# This is use for storing the contents of a list which contain optional tensors +# (Tensor?[], ex. [Tensor, None, ...]), where the list will be serialized to the +# type List[OptionalTensorArgument], with tensor values seiralized to the +# "as_tensor" field, and None values serialized to the "as_none" field. +@dataclass(repr=False) +class OptionalTensorArgument(_Union): + as_tensor: str + as_none: Tuple[()] + + +@dataclass +class GraphArgument: + name: str + graph: 'Graph' + + +@dataclass +class CustomObjArgument: + name: str + + +# This is actually a union type +@dataclass(repr=False) +class Argument(_Union): + as_none: Tuple[()] + as_tensor: TensorArgument + as_tensors: List[TensorArgument] + as_int: int + as_ints: List[int] + as_float: float + as_floats: List[float] + as_string: str + as_strings: List[str] + as_sym_int: SymIntArgument + as_sym_ints: List[SymIntArgument] + as_scalar_type: ScalarType + as_memory_format: MemoryFormat + as_layout: Layout + as_device: Device + as_bool: bool + as_bools: List[bool] + as_sym_bool: SymBoolArgument + as_sym_bools: List[SymBoolArgument] + as_graph: GraphArgument + as_optional_tensors: List[OptionalTensorArgument] + as_custom_obj: CustomObjArgument + + +@dataclass +class NamedArgument: + # Argument name from the operator schema + name: str + arg: Argument + + +@dataclass +class Node: + target: str + inputs: List[NamedArgument] + outputs: List[Argument] + metadata: Dict[str, str] + + +@dataclass +class Graph: + inputs: List[Argument] + outputs: List[Argument] + nodes: List[Node] + tensor_values: Dict[str, TensorMeta] + sym_int_values: Dict[str, SymInt] + sym_bool_values: Dict[str, SymBool] + # This is for deserializing the submodule graphs from higher order ops + # (ex. cond, map) where single tensor returns will just return a single + # tensor, rather than following export schema and returning a singleton + # list. + is_single_tensor_return: bool = False + + +@dataclass +class UserInputSpec: + # Actually, only tensors and SymInts are allowed here + arg: Argument + + +@dataclass +class InputToParameterSpec: + arg: TensorArgument + parameter_name: str + + +@dataclass +class InputToBufferSpec: + arg: TensorArgument + buffer_name: str + + +@dataclass +class InputToTensorConstantSpec: + arg: TensorArgument + tensor_constant_name: str + + +@dataclass +class InputSpec(_Union): + user_input: UserInputSpec + parameter: InputToParameterSpec + buffer: InputToBufferSpec + tensor_constant: InputToTensorConstantSpec + + +@dataclass +class UserOutputSpec: + arg: Argument + + +@dataclass +class LossOutputSpec: + arg: TensorArgument + + +@dataclass +class BufferMutationSpec: + arg: TensorArgument + buffer_name: str + + +@dataclass +class GradientToParameterSpec: + arg: TensorArgument + parameter_name: str + + +@dataclass +class GradientToUserInputSpec: + arg: TensorArgument + user_input_name: str + + +@dataclass +class OutputSpec(_Union): + user_output: UserOutputSpec + loss_output: LossOutputSpec + buffer_mutation: BufferMutationSpec + gradient_to_parameter: GradientToParameterSpec + gradient_to_user_input: GradientToUserInputSpec + + +@dataclass +class GraphSignature: + input_specs: List[InputSpec] + output_specs: List[OutputSpec] + + +@dataclass +class RangeConstraint: + min_val: int + max_val: int + + +@dataclass +class ModuleCallSignature: + inputs: List[Argument] + outputs: List[Argument] + + # These are serialized by calling pytree.treespec_loads + # And deserialized by calling pytree.treespec_dumps + in_spec: str + out_spec: str + + +@dataclass +class ModuleCallEntry: + fqn: str + signature: Optional[ModuleCallSignature] = None + + +@dataclass +class GraphModule: + graph: Graph + signature: GraphSignature + # This is used for unflattening, by tracking the calling structure of all of + # the modules in order to unflatten the modules back to the eager calling + # conventions. + module_call_graph: List[ModuleCallEntry] + + +@dataclass +class ExportedProgram: + graph_module: GraphModule + # Key is the opset namespace (ex. aten), and value is the version number + opset_version: Dict[str, int] + range_constraints: Dict[str, RangeConstraint] + schema_version: int + dialect: str diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/serialize.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..34b7bcbe782000a57b7fa3fb23da72d5f8357d83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/serialize.py @@ -0,0 +1,1722 @@ +import base64 +import dataclasses +import io +import json +import logging +import math +import operator +import typing + +from contextlib import contextmanager +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Union + +import sympy + +import torch +import torch.export.exported_program as ep +from torch._export.verifier import load_verifier +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch.fx.experimental import symbolic_shapes +from torch.utils._pytree import treespec_dumps, treespec_loads +from torch.utils._sympy.value_ranges import ValueRanges + +from .schema import ( # type: ignore[attr-defined] + _Union, + Argument, + BufferMutationSpec, + CustomObjArgument, + Device, + ExportedProgram, + GradientToParameterSpec, + GradientToUserInputSpec, + Graph, + GraphArgument, + GraphModule, + GraphSignature, + InputSpec, + InputToBufferSpec, + InputToParameterSpec, + InputToTensorConstantSpec, + Layout, + LossOutputSpec, + MemoryFormat, + ModuleCallEntry, + ModuleCallSignature, + NamedArgument, + Node, + OptionalTensorArgument, + OutputSpec, + RangeConstraint, + ScalarType, + SCHEMA_VERSION, + SymBool, + SymBoolArgument, + SymExpr, + SymExprHint, + SymInt, + SymIntArgument, + TensorArgument, + TensorMeta, + TREESPEC_VERSION, + UserInputSpec, + UserOutputSpec, +) + + +__all__ = [ + "serialize", + "GraphModuleSerializer", + "ExportedProgramSerializer", + "GraphModuleDeserializer", + "ExportedProgramDeserializer", +] + +from torch.export.exported_program import ( + ConstantArgument as PyConstantArgument, + SymIntArgument as PySymIntArgument, + TensorArgument as PyTensorArgument, +) + +from .upgrade import GraphModuleOpUpgrader + +log = logging.getLogger(__name__) + + +class SerializeError(RuntimeError): + pass + + +def _reverse_map(d: Dict[Any, Enum]): + return {v.value: k for k, v in d.items()} + + +MetaType = Union[FakeTensor, int, torch.SymInt, bool, torch.SymBool] + + +ST_DELIMITER = ";" + +_TORCH_TO_SERIALIZE_DTYPE = { + torch.uint8: ScalarType.BYTE, + torch.int8: ScalarType.CHAR, + torch.int16: ScalarType.SHORT, + torch.int32: ScalarType.INT, + torch.int64: ScalarType.LONG, + torch.float16: ScalarType.HALF, + torch.float32: ScalarType.FLOAT, + torch.float64: ScalarType.DOUBLE, + torch.complex32: ScalarType.COMPLEXHALF, + torch.complex64: ScalarType.COMPLEXFLOAT, + torch.complex128: ScalarType.COMPLEXDOUBLE, + torch.bool: ScalarType.BOOL, + torch.bfloat16: ScalarType.BFLOAT16 +} + + +_SERIALIZE_TO_TORCH_DTYPE = _reverse_map(_TORCH_TO_SERIALIZE_DTYPE) # type: ignore[arg-type] + + +_TORCH_TO_SERIALIZE_LAYOUT = { + torch.sparse_coo: Layout.SparseCoo, + torch.sparse_csr: Layout.SparseCsr, + torch.sparse_csc: Layout.SparseCsc, + torch.sparse_bsr: Layout.SparseBsr, + torch.sparse_bsc: Layout.SparseBsc, + torch._mkldnn: Layout._mkldnn, # type: ignore[attr-defined] + torch.strided: Layout.Strided, +} + + +_SERIALIZE_TO_TORCH_LAYOUT = _reverse_map(_TORCH_TO_SERIALIZE_LAYOUT) # type: ignore[arg-type] + + +_TORCH_TO_SERIALIZE_MEMORY_FORMAT = { + torch.contiguous_format: MemoryFormat.ContiguousFormat, + torch.channels_last: MemoryFormat.ChannelsLast, + torch.channels_last_3d: MemoryFormat.ChannelsLast3d, + torch.preserve_format: MemoryFormat.PreserveFormat, +} + + +_SERIALIZE_TO_TORCH_MEMORY_FORMAT = _reverse_map(_TORCH_TO_SERIALIZE_MEMORY_FORMAT) # type: ignore[arg-type] + + +_SYM_INT_OPS = { + operator.mul, + operator.add, + operator.sub, + operator.floordiv, + operator.mod, +} + + +_SYM_BOOL_OPS = { + operator.eq, + operator.ne, + operator.le, + operator.ge, + operator.lt, + operator.gt, +} + + +@dataclass +class SerializedArtifact: + exported_program: Union[ExportedProgram, bytes] + state_dict: bytes + constants: bytes + + +def deserialize_device(d: Device) -> torch.device: + if d.index is None: + return torch.device(type=d.type) # type: ignore[call-overload] + return torch.device(type=d.type, index=d.index) + + +def serialize_sym_int(s: Union[int, torch.SymInt]) -> SymInt: + if isinstance(s, (torch.SymInt, int)): + if symbolic_shapes.is_concrete_int(s): + return SymInt.create(as_int=int(s)) + else: + assert isinstance(s, torch.SymInt) + if s.node.hint is None: + return SymInt.create(as_expr=SymExpr(str(s))) + else: + return SymInt.create(as_expr=SymExpr(str(s), hint=SymExprHint.create(as_int=s.node.hint))) + else: + raise SerializeError( + f"SymInt should be either symbol or int, got `{s}` of type `{type(s)}`" + ) + + +def serialize_sym_bool(s: Union[bool, torch.SymBool]) -> SymBool: + if isinstance(s, (torch.SymBool, bool)): + if symbolic_shapes.is_concrete_bool(s): + return SymBool.create(as_bool=bool(s)) + else: + return SymBool.create(as_expr=SymExpr(expr_str=str(s))) + else: + raise SerializeError( + f"SymBool should be either symbol or bool, got `{s}` of type `{type(s)}`" + ) + + +def serialize_tensor_meta(t: torch.Tensor) -> TensorMeta: + """ + Extract a TensorMeta describing `t`. + """ + return TensorMeta( + dtype=_TORCH_TO_SERIALIZE_DTYPE[t.dtype], + sizes=[serialize_sym_int(s) for s in t.shape], + requires_grad=t.requires_grad, + device=Device(type=t.device.type, index=t.device.index), + strides=[serialize_sym_int(s) for s in t.stride()], + storage_offset=0, + layout=_TORCH_TO_SERIALIZE_LAYOUT[t.layout], + ) + + +def serialize_torch_artifact(artifact) -> bytes: + buffer = io.BytesIO() + # This is a workaround for backend's tensor deserialization problem: + # unpickleTensor() always create a tensor on the device where it was originally saved + # This behavior is bad for multi-gpu training, as we wish to directly load the tensor + # on the designated device. + # For now, we simply move the tensor to cpu before saving. + # TODO: this should be fixed by deserialization instead. + torch.save(artifact, buffer) + return buffer.getvalue() + + +def deserialize_torch_artifact(serialized: bytes): + if len(serialized) == 0: + return {} + buffer = io.BytesIO(serialized) + buffer.seek(0) + return torch.load(buffer) + + +def _sympy_int_to_int(val: sympy.Expr): + # Convert simple sympy Integers into concrete int + if val == sympy.oo: + return math.inf + if val == -sympy.oo: + return -math.inf + if isinstance(val, sympy.Integer): + return int(val) + raise RuntimeError( + "Export constraints cannot be non-integer expressions" + ) + + +def _int_to_sympy_int(val) -> sympy.Expr: + # Convert concrete int into simple sympy Integers + if val == math.inf: + return sympy.oo + if val == -math.inf: + return -sympy.oo + return sympy.Integer(val) + + +def serialize_range_constraints( + range_constraints: Dict[sympy.Symbol, ValueRanges] +) -> Dict[str, RangeConstraint]: + return { + str(k): RangeConstraint( + _sympy_int_to_int(v.lower), # type: ignore[arg-type] + _sympy_int_to_int(v.upper), # type: ignore[arg-type] + ) + for k, v in range_constraints.items() + } + + +def _is_single_tensor_return(target: torch._ops.OpOverload) -> bool: + returns = target._schema.returns + return len(returns) == 1 and isinstance(returns[0].real_type, torch.TensorType) + + +def _is_single_tensor_list_return(target: torch._ops.OpOverload) -> bool: + returns = target._schema.returns + if len(returns) != 1: + return False + return_type = returns[0].real_type + return isinstance(return_type, torch.ListType) and isinstance( + return_type.getElementType(), torch.TensorType + ) + + +@dataclass +class GraphState: + inputs: List[Argument] = field(default_factory=list) + outputs: List[Argument] = field(default_factory=list) + nodes: List[Node] = field(default_factory=list) + tensor_values: Dict[str, TensorMeta] = field(default_factory=dict) + sym_int_values: Dict[str, SymInt] = field(default_factory=dict) + sym_bool_values: Dict[str, SymBool] = field(default_factory=dict) + is_single_tensor_return: bool = False + + +class GraphModuleSerializer: + def __init__( + self, + graph_signature: ep.ExportGraphSignature, + module_call_graph: List[ep.ModuleCallEntry] + ): + self.graph_state = GraphState() + self.graph_signature = graph_signature + self.module_call_graph = module_call_graph + self.custom_objs: Dict[str, torch._C.ScriptObject] = {} + + @contextmanager + def save_graph_state(self): + saved = self.graph_state + self.graph_state = GraphState() + try: + yield + finally: + self.graph_state = saved + + def handle_placeholder(self, node: torch.fx.Node): + assert node.op == "placeholder" + if isinstance(node.meta['val'], torch.Tensor): + graph_input = Argument.create(as_tensor=TensorArgument(name=node.name)) + self.graph_state.tensor_values[node.name] = serialize_tensor_meta(node.meta["val"]) + elif isinstance(node.meta['val'], torch.SymInt): + raise AssertionError("SymInt graph input is not implemented yet.") + elif isinstance(node.meta['val'], (int, bool, str, float, type(None))): + graph_input = self.serialize_input(node.meta['val']) + else: + raise AssertionError(f"Unimplemented graph input type: {node.meta['val']}") + self.graph_state.inputs.append(graph_input) + + def handle_output(self, node: torch.fx.Node): + assert node.op == "output" + assert len(node.args) == 1, "FX.Node's args should have one arg" + node_args = node.args[0] + if isinstance(node_args, torch.fx.Node): + # For singleton tensor returns + self.graph_state.is_single_tensor_return = True + self.graph_state.outputs = [self.serialize_input(node_args)] + else: + assert isinstance(node_args, (tuple, list)) + self.graph_state.outputs = [self.serialize_input(arg) for arg in node_args] + + def serialize_operator(self, target) -> str: + if isinstance(target, str): + return target + elif target.__module__.startswith("torch._ops"): + # TODO(zhxchen17) Maybe provide a function name helper in FX. + # From torch.fx.node._get_qualified_name + module = target.__module__.replace("torch._ops", "torch.ops") + return f"{module}.{target.__name__}" + else: # TODO(zhxchen17) Don't catch all here. + return f"{target.__module__}.{target.__name__}" + + def handle_call_function(self, node: torch.fx.Node): + assert node.op == "call_function" + + # getitem has been handled in the producer node, skip it here + if node.target is operator.getitem: + return + + if node.target in _SYM_INT_OPS: + assert len(node.kwargs) == 0 + meta_val = node.meta["val"] + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_sym_op_inputs(node.args), + outputs=[Argument.create(as_sym_int=self.serialize_sym_int_output(node.name, meta_val))], + metadata=self.serialize_metadata(node), + ) + elif node.target in _SYM_BOOL_OPS: + assert len(node.kwargs) == 0 + meta_val = node.meta["val"] + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_sym_op_inputs(node.args), + outputs=[Argument.create(as_sym_bool=self.serialize_sym_bool_output(node.name, meta_val))], + metadata=self.serialize_metadata(node), + ) + elif isinstance(node.target, torch._ops.OpOverload): + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=self.serialize_inputs(node.target, node.args, node.kwargs), + outputs=self.serialize_outputs(node), + # TODO: create a new tensor_values here, meta might have faketensor info + metadata=self.serialize_metadata(node), + ) + elif isinstance(node.target, torch._ops.HigherOrderOperator): + + inputs = [ + NamedArgument( + name="", # TODO(zhxchen17) This is sad, should be improved when HOO has schema arg names. + arg=self.serialize_input(a), + ) for a in node.args + ] + + meta_val = node.meta["val"] + + if isinstance(meta_val, torch.Tensor): + outputs = [Argument.create(as_tensor=self.serialize_tensor_output(node.name, meta_val))] + elif isinstance(meta_val, (list, tuple)) and all(isinstance(v, torch.Tensor) for v in meta_val): + arg_list = self._handle_getitem_users(node) + outputs = [Argument.create(as_tensors=arg_list)] + else: + raise SerializeError( + "Only single tensor output or list of tensor output " + "is supported for HigherOrderOperator serialization" + ) + + ex_node = Node( + target=self.serialize_operator(node.target), + inputs=inputs, + outputs=outputs, + metadata=self.serialize_metadata(node), + ) + else: + raise SerializeError(f"Serializing {node.target} is not supported") + + self.graph_state.nodes.append(ex_node) + + def handle_get_attr(self, node): + pass + + def serialize_metadata(self, node: torch.fx.Node) -> Dict[str, str]: + ret = {} + if stack_trace := node.meta.get("stack_trace"): + ret["stack_trace"] = stack_trace + + if nn_module_stack := node.meta.get("nn_module_stack"): + def export_nn_module_stack(val): + assert isinstance(val, tuple) and len(val) == 2 + path, ty = val + + assert isinstance(path, str) + normalized_ty = ty.__module__ + "." + ty.__qualname__ + return path + "," + normalized_ty + + # Serialize to "key,orig_path,type_str" + nn_module_list = [ + f"{k},{export_nn_module_stack(v)}" + for k, v in nn_module_stack.items() + ] + ret["nn_module_stack"] = ST_DELIMITER.join(nn_module_list) + + if source_fn_st := node.meta.get("source_fn_stack"): + source_fn_list = [f"{source_fn[0]},{self.serialize_operator(source_fn[1])}" for source_fn in source_fn_st] + ret["source_fn_stack"] = ST_DELIMITER.join(source_fn_list) + + return ret + + def serialize_sym_op_inputs(self, args) -> List[NamedArgument]: + serialized_args = [] + args_names = ["a", "b"] + for args_name, arg in zip(args_names, args): + serialized_args.append( + NamedArgument(name=args_name, arg=self.serialize_input(arg)) + ) + return serialized_args + + def serialize_inputs( + self, target: torch._ops.OpOverload, args, kwargs=None + ) -> List[NamedArgument]: + assert isinstance(target, torch._ops.OpOverload) + kwargs = kwargs or {} + serialized_args = [] + for i, schema_arg in enumerate(target._schema.arguments): + if schema_arg.name in kwargs: + serialized_args.append( + NamedArgument( + name=schema_arg.name, + arg=self.serialize_input(kwargs[schema_arg.name]), + ) + ) + elif not schema_arg.kwarg_only and i < len(args): + serialized_args.append( + NamedArgument( + name=schema_arg.name, + arg=self.serialize_input(args[i]), + ) + ) + else: + # We intentionally don't serialize the missing arguments + # with default values + pass + + + return serialized_args + + def is_sym_int_arg(self, arg) -> bool: + return isinstance(arg, int) or ( + isinstance(arg, torch.fx.Node) and arg.name in self.graph_state.sym_int_values + ) + + def is_sym_bool_arg(self, arg) -> bool: + return isinstance(arg, bool) or ( + isinstance(arg, torch.fx.Node) and arg.name in self.graph_state.sym_bool_values + ) + + def serialize_input(self, arg) -> Argument: + import torch._inductor.ir as inductor_ir + inductor_tensor_buffers = ( + inductor_ir.Buffer, + inductor_ir.ReinterpretView, + ) + + if isinstance(arg, torch.fx.Node): + if arg.op == "get_attr": + assert isinstance(arg.target, str) + attr = getattr(arg.graph.owning_module, arg.target) + + if isinstance(attr, torch.Tensor): + raise SerializeError("getattr nodes containing tensors should not appear in the graph") + elif isinstance(attr, torch.fx.GraphModule): + with self.save_graph_state(): + graph = self.serialize_graph(attr) + return Argument.create(as_graph=GraphArgument(name=arg.target, graph=graph)) + else: + raise SerializeError(f"Unsupported getattr attribute {arg.target} with type: {type(attr)}") + elif self.is_sym_int_arg(arg): + return Argument.create(as_sym_int=SymIntArgument.create(as_name=arg.name)) + elif self.is_sym_bool_arg(arg): + return Argument.create(as_sym_bool=SymBoolArgument.create(as_name=arg.name)) + else: + return Argument.create(as_tensor=TensorArgument(name=arg.name)) + elif isinstance(arg, inductor_tensor_buffers): + # Other branches are for arguments in fx node. + # This is a special branch for handling buffers (representing tensor arguments) + # for inductor's ExternalFallbackNode + # export_extern_kernel_node() is using this function to serialize arguments + arg_name = arg.get_name() + assert arg_name is not None, "Buffer must have valid name" + return Argument.create(as_tensor=TensorArgument(name=arg_name)) + elif isinstance(arg, torch.SymInt): + # This is a special branch for handling SymInt args in inductor's + # ExternalFallbackNode. + # For regular FX graph, SymInt arg should be a fx.Node with + # self.is_sym_int_arg(arg) being true + return Argument.create(as_sym_int=SymIntArgument.create(as_name=str(arg))) + elif isinstance(arg, bool): + return Argument.create(as_bool=arg) + elif isinstance(arg, str): + return Argument.create(as_string=arg) + elif isinstance(arg, int): + return Argument.create(as_int=arg) + elif isinstance(arg, float): + return Argument.create(as_float=arg) + elif arg is None: + return Argument.create(as_none=()) + elif isinstance(arg, (list, tuple)): + # Must check bool first, as bool is also treated as int + if all(isinstance(a, bool) for a in arg): + return Argument.create(as_bools=list(arg)) + elif all(isinstance(a, int) for a in arg): + return Argument.create(as_ints=list(arg)) + elif all(isinstance(a, float) for a in arg): + return Argument.create(as_floats=list(arg)) + elif all(isinstance(a, str) for a in arg): + return Argument.create(as_strings=list(arg)) + elif all(isinstance(a, torch.SymInt) for a in arg): + # This is a special branch for handling SymInt args in inductor's + # ExternalFallbackNode. + # For regular FX graph, SymInt arg should be a fx.Node with + # self.is_sym_int_arg(arg) being true + return Argument.create( + as_sym_ints=[SymIntArgument.create(as_name=str(a)) for a in arg] + ) + elif all(self.is_sym_int_arg(a) for a in arg): + # list of sym_ints + values = [] + for a in arg: + if isinstance(a, torch.fx.Node): + values.append(SymIntArgument.create(as_name=a.name)) + elif isinstance(a, int): + values.append(SymIntArgument.create(as_int=a)) + return Argument.create(as_sym_ints=values) + elif all(self.is_sym_bool_arg(a) for a in arg): + # list of sym_bools + values = [] + for a in arg: + if isinstance(a, torch.fx.Node): + values.append(SymBoolArgument.create(as_name=a.name)) + elif isinstance(a, bool): + values.append(SymBoolArgument.create(as_bool=a)) + return Argument.create(as_sym_bools=values) + elif all(isinstance(a, torch.fx.Node) for a in arg): + # list of tensors + arguments = [] + for a in arg: + if a.op == "get_attr": + raise SerializeError("getattr nodes containing tensors should not appear in the graph") + arguments.append(TensorArgument(name=a.name)) + return Argument.create(as_tensors=arguments) + elif all(isinstance(a, (torch.fx.Node, type(None))) for a in arg): + # list of optional tensors + def serialize_optional_tensor_args(a): + if a is None: + return OptionalTensorArgument.create(as_none=()) + elif isinstance(a, torch.fx.Node): + return OptionalTensorArgument.create(as_tensor=a.name) + else: + raise SerializeError(f"Unsupported list/tuple argument: {a}") + return Argument.create( + as_optional_tensors=list(map(serialize_optional_tensor_args, arg)) + ) + elif all(isinstance(a, inductor_tensor_buffers) for a in arg): + # list of inductor buffers + return Argument.create( + as_tensors=[TensorArgument(name=a.get_name()) for a in arg], + ) + elif all(isinstance(a, (*inductor_tensor_buffers, type(None))) for a in arg): + # list of inductor buffers as optional tensors + def serialize_optional_tensor_args(a): + if a is None: + return OptionalTensorArgument.create(as_none=()) + elif isinstance(a, inductor_tensor_buffers): + return OptionalTensorArgument.create(as_tensor=a.get_name()) + else: + raise SerializeError(f"Unsupported list/tuple argument: {a}") + return Argument.create( + as_optional_tensors=list(map(serialize_optional_tensor_args, arg)) + ) + else: + raise SerializeError(f"Unsupported list/tuple argument type: {[type(a) for a in arg]}") + elif isinstance(arg, torch.dtype): + return Argument.create(as_scalar_type=_TORCH_TO_SERIALIZE_DTYPE[arg]) + elif isinstance(arg, torch.device): + return Argument.create(as_device=Device(type=arg.type, index=arg.index)) + elif isinstance(arg, torch.memory_format): + return Argument.create(as_memory_format=_TORCH_TO_SERIALIZE_MEMORY_FORMAT[arg]) + elif isinstance(arg, torch.layout): + return Argument.create(as_layout=_TORCH_TO_SERIALIZE_LAYOUT[arg]) + elif isinstance(arg, torch._C.ScriptObject): + if not ( + arg._has_method("__getstate__") and # type: ignore[attr-defined] + arg._has_method("__setstate__") # type: ignore[attr-defined] + ): + raise SerializeError( + f"Unable to serialize custom class {arg}. Please define " + "serialization methods via def_pickle()." + ) + # Custom objects through torchind are serializable with pickle, + # through implementing the .def_pickle function. This should result + # in the object containing a __getstate__ and __setstate__ + # serialize/deserialize function. + custom_obj_name = f"_custom_obj_{len(self.custom_objs)}" + self.custom_objs[custom_obj_name] = arg + return Argument.create(as_custom_obj=CustomObjArgument(custom_obj_name)) + else: + raise SerializeError(f"Unsupported argument type: {type(arg)}") + + def serialize_tensor_output(self, name, meta_val) -> TensorArgument: + assert name not in self.graph_state.tensor_values + self.graph_state.tensor_values[name] = serialize_tensor_meta(meta_val) + return TensorArgument(name=name) + + def serialize_sym_int_output(self, name, meta_val) -> SymIntArgument: + assert name not in self.graph_state.sym_int_values + self.graph_state.sym_int_values[name] = serialize_sym_int(meta_val) + return SymIntArgument.create(as_name=name) + + def serialize_sym_bool_output(self, name, meta_val) -> SymIntArgument: + assert name not in self.graph_state.sym_bool_values + self.graph_state.sym_bool_values[name] = serialize_sym_bool(meta_val) + return SymBoolArgument.create(as_name=name) + + def serialize_input_spec(self, spec: ep.InputSpec) -> InputSpec: + if spec.kind == ep.InputKind.USER_INPUT: + return InputSpec.create( + user_input=UserInputSpec( + arg=self.serialize_argument_spec(spec.arg) + ) + ) + elif spec.kind == ep.InputKind.PARAMETER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + parameter=InputToParameterSpec( + arg=TensorArgument(name=spec.arg.name), + parameter_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.BUFFER: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + buffer=InputToBufferSpec( + arg=TensorArgument(name=spec.arg.name), + buffer_name=spec.target, + ) + ) + elif spec.kind == ep.InputKind.CONSTANT_TENSOR: + assert spec.target is not None + assert isinstance(spec.arg, ep.TensorArgument) + return InputSpec.create( + tensor_constant=InputToTensorConstantSpec( + arg=TensorArgument(name=spec.arg.name), + tensor_constant_name=spec.target, + ) + ) + else: + raise AssertionError(f"Unknown argument kind: {spec}") + + def serialize_output_spec(self, spec: ep.OutputSpec) -> OutputSpec: + if spec.kind == ep.OutputKind.USER_OUTPUT: + return OutputSpec.create( + user_output=UserOutputSpec( + arg=self.serialize_argument_spec(spec.arg) + ) + ) + elif spec.kind == ep.OutputKind.LOSS_OUTPUT: + assert isinstance(spec.arg, ep.TensorArgument) + return OutputSpec.create( + loss_output=LossOutputSpec( + arg=TensorArgument(name=spec.arg.name) + ) + ) + elif spec.kind == ep.OutputKind.BUFFER_MUTATION: + assert spec.target is not None + assert isinstance(spec.arg, PyTensorArgument) + return OutputSpec.create( + buffer_mutation=BufferMutationSpec( + arg=TensorArgument(name=spec.arg.name), + buffer_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.GRADIENT_TO_PARAMETER: + assert spec.target is not None + assert isinstance(spec.arg, PyTensorArgument) + return OutputSpec.create( + gradient_to_parameter=GradientToParameterSpec( + arg=TensorArgument(name=spec.arg.name), + parameter_name=spec.target, + ) + ) + elif spec.kind == ep.OutputKind.GRADIENT_TO_USER_INPUT: + assert spec.target is not None + assert isinstance(spec.arg, PyTensorArgument) + return OutputSpec.create( + gradient_to_user_input=GradientToUserInputSpec( + arg=TensorArgument(name=spec.arg.name), + user_input_name=spec.target, + ) + ) + else: + raise AssertionError(f"Unknown argument kind: {spec}") + + def serialize_signature(self, sig: ep.ExportGraphSignature) -> GraphSignature: + return GraphSignature( + input_specs=[self.serialize_input_spec(s) for s in sig.input_specs], + output_specs=[self.serialize_output_spec(s) for s in sig.output_specs], + ) + + def serialize_argument_spec(self, x: ep.ArgumentSpec) -> Argument: + if isinstance(x, PyTensorArgument): + return Argument.create(as_tensor=TensorArgument(name=x.name)) + elif isinstance(x, PySymIntArgument): + return Argument.create(as_sym_int=SymIntArgument.create(as_name=x.name)) + elif isinstance(x, PyConstantArgument): + return self.serialize_input(x.value) + else: + raise AssertionError("TODO") + + def serialize_module_call_signature(self, module_call_signature: ep.ModuleCallSignature) -> ModuleCallSignature: + return ModuleCallSignature( + inputs=[self.serialize_argument_spec(x) for x in module_call_signature.inputs], + outputs=[self.serialize_argument_spec(x) for x in module_call_signature.outputs], + in_spec=treespec_dumps(module_call_signature.in_spec, TREESPEC_VERSION), + out_spec=treespec_dumps(module_call_signature.out_spec, TREESPEC_VERSION), + ) + + def serialize_module_call_graph(self, module_call_graph: List[ep.ModuleCallEntry]) -> List[ModuleCallEntry]: + return [ + ModuleCallEntry( + fqn=entry.fqn, + signature=self.serialize_module_call_signature(entry.signature) if entry.signature else None, + ) for entry in module_call_graph + ] + + def serialize_outputs(self, node: torch.fx.Node) -> List[Argument]: + """For a given node, return the dataclass representing its output values. + + [NOTE: Multiple outputs] We handle aggregates differently than FX. For + FX, it looks like: + + x = call_function("multiple_return", ...) + element0 = call_function(getitem, x, 0) + foo = call_function("use_output", element0) + + We do not want the intermediate `getitem` call, so our serialized thing looks like: + + element0, element1, element2 = call_function("multiple_return", ...) + foo = call_function("use_output", element0) + + We want names to be consistent across these two schemes, so that we can + mostly reuse the names coming from FX. This function computes a mapping from + the FX representation to our representation, preserving the names. + """ + assert node.op == "call_function" and isinstance(node.target, torch._ops.OpOverload) + + assert isinstance(node.target, torch._ops.OpOverload) + returns = node.target._schema.returns + + if len(returns) == 0: + return [] + + meta_val = node.meta["val"] + + def output_node_at_index(node, index): + for user in node.users: + assert user.target is operator.getitem, f"{user} is not a getitem node" + if index == user.args[1]: + return user + return None + + # Check single value return + if _is_single_tensor_return(node.target): + # e.g "-> Tensor" + return [Argument.create(as_tensor=self.serialize_tensor_output(node.name, meta_val))] + elif len(returns) == 1 and isinstance(meta_val, torch.SymInt): + # e.g "-> SymInt" + return [Argument.create(as_sym_int=self.serialize_sym_int_output(node.name, meta_val))] + elif len(returns) == 1 and isinstance(meta_val, torch.SymBool): + # e.g "-> SymBool" + return [Argument.create(as_sym_bool=self.serialize_sym_bool_output(node.name, meta_val))] + elif _is_single_tensor_list_return(node.target): + # e.g "-> Tensor[]" + tensor_args = [] + for idx, meta in enumerate(meta_val): + user_node = output_node_at_index(node, idx) + name = ( + user_node.name + if user_node is not None + else f"{node.name}_unused_{idx}" + ) + tensor_args.append(self.serialize_tensor_output(name, meta)) + return [Argument.create(as_tensors=tensor_args)] + + # There are a two possibilities at this point: + # - This operator returns a tuple of Tensors, e.g. "-> (Tensor, Tensor)" + # - This operator returns a tuple of mixed of Tensor and Tensors, e.g. "-> (Tensor, Tensor[])" + # + # Either way, start by gathering a list of TensorArguments with the correct names. + # For consistent naming with FX, consult the downstream `getitem` node and + # make sure our outputs have the same name. + + output_arguments = [] + for idx, (meta, return_schema) in enumerate(zip(meta_val, returns)): + if meta is None: + assert isinstance(return_schema.real_type, torch.OptionalType) + output_arguments.append(Argument.create(as_none=())) + elif isinstance(meta, torch._subclasses.fake_tensor.FakeTensor): + assert isinstance(return_schema.real_type, torch.TensorType) + user_node = output_node_at_index(node, idx) + name = ( + user_node.name + if user_node is not None + else f"{node.name}_unused_{idx}" + ) + output_arguments.append( + Argument.create(as_tensor=self.serialize_tensor_output(name, meta)) + ) + elif isinstance(meta, list): + # for List[Tensor] return type + assert isinstance( + return_schema.real_type, torch.ListType + ) and isinstance( + return_schema.real_type.getElementType(), torch.TensorType + ) + user_node = output_node_at_index(node, idx) + assert user_node is not None + + args = [] + for i, m in enumerate(meta): + if m is None: + continue + sub_user_node = output_node_at_index(user_node, i) + assert sub_user_node is not None, f"No user found at index {i}" + + args.append(self.serialize_tensor_output(sub_user_node.name, m)) + output_arguments.append(Argument.create(as_tensors=args)) + + return output_arguments + + def _handle_getitem_users(self, node: torch.fx.Node) -> List[TensorArgument]: + meta_val = node.meta["val"] + + idx_to_name = {} + for user in node.users: + assert user.target is operator.getitem, f"User node {user} of {node} is incorrect" + idx_to_name[user.args[1]] = user.name + + for idx, _ in enumerate(meta_val): + # FX does not emit a getitem node for any outputs that are unused. + # However, we need a name for them so that the number of outputs will + # correctly match the schema. Just assign a dummy name. + if idx not in idx_to_name: + idx_to_name[idx] = f"{node.name}_unused_{idx}" + + arg_list = [] + for i, element_meta_val in enumerate(meta_val): + arg_list.append( + self.serialize_tensor_output(idx_to_name[i], element_meta_val) + ) + + return arg_list + + def serialize_graph(self, graph_module: torch.fx.GraphModule) -> Graph: + assert isinstance(graph_module, torch.fx.GraphModule) + for node in graph_module.graph.nodes: + try: + getattr(self, f"handle_{node.op}")(node) + except Exception as e: + raise SerializeError(f"Failed serializing node {node} in graph: {node.format_node()}") from e + + return Graph( + inputs=self.graph_state.inputs, + nodes=self.graph_state.nodes, + tensor_values=self.graph_state.tensor_values, + sym_int_values=self.graph_state.sym_int_values, + sym_bool_values=self.graph_state.sym_bool_values, + outputs=self.graph_state.outputs, + is_single_tensor_return=self.graph_state.is_single_tensor_return, + ) + + def serialize(self, graph_module: torch.fx.GraphModule) -> GraphModule: + graph = self.serialize_graph(graph_module) + + return GraphModule( + graph=graph, + signature=self.serialize_signature(self.graph_signature), + module_call_graph=self.serialize_module_call_graph(self.module_call_graph), + ) + + +class ExportedProgramSerializer: + def __init__(self, opset_version: Optional[Dict[str, int]] = None): + self.opset_version: Dict[str, int] = {} + if opset_version: + self.opset_version.update(opset_version) + if "aten" not in self.opset_version: + self.opset_version["aten"] = torch._C._get_max_operator_version() + + def serialize(self, exported_program: ep.ExportedProgram) -> SerializedArtifact: + """ + Args: + exported_program: Exported Program to serialize + """ + gm_serializer = GraphModuleSerializer( + exported_program.graph_signature, + exported_program.module_call_graph + ) + serialized_graph_module = gm_serializer.serialize(exported_program.graph_module) + serialized_range_constraints = serialize_range_constraints(exported_program.range_constraints) + + # TODO: Directly serialize exported_program.constants once + # CustomClassHolders get stored in the ExportedProgram rather than in + # the graph + constants = {} + for n, c in gm_serializer.custom_objs.items(): + constants[n] = c + for n, t in exported_program.tensor_constants.items(): + assert n not in constants + constants[n] = t + + return SerializedArtifact( + ExportedProgram( + graph_module=serialized_graph_module, + opset_version=self.opset_version, + range_constraints=serialized_range_constraints, + schema_version=SCHEMA_VERSION, + dialect=exported_program.dialect, + ), + serialize_torch_artifact(exported_program.state_dict), + serialize_torch_artifact(constants), + ) + + +class GraphModuleDeserializer: + @dataclasses.dataclass + class Result: + graph_module: torch.fx.GraphModule + signature: ep.ExportGraphSignature + module_call_graph: List[ep.ModuleCallEntry] + names_to_symbols: Dict[str, sympy.Symbol] + + def __init__(self): + self.serialized_name_to_node: Dict[str, torch.fx.Node] = {} + self.serialized_name_to_meta: Dict[str, MetaType] = {} + self.graph = torch.fx.Graph() + self.module = torch.nn.Module() + + @contextmanager + def save_graph_module(self) -> Iterator[None]: + saved = self.graph, self.module, self.serialized_name_to_node, self.serialized_name_to_meta + self.graph = torch.fx.Graph() + self.module = torch.nn.Module() + self.serialized_name_to_node = {} + self.serialized_name_to_meta = {} + try: + yield + finally: + self.graph, self.module, self.serialized_name_to_node, self.serialized_name_to_meta = saved + + def deserialize_operator(self, serialized_target: str): + if serialized_target.startswith("_operator"): # TODO(zhxchen17) Follow up on this. + module = operator + serialized_target_names = serialized_target.split(".")[1:] + elif serialized_target.startswith("torch.ops"): + module = torch.ops + serialized_target_names = serialized_target.split(".")[2:] + else: # TODO(zhxchen17) Don't catch all here. + return serialized_target + + target = module + for name in serialized_target_names: + if not hasattr(target, name): + return serialized_target + else: + target = getattr(target, name) + return target + + def deserialize_sym_int(self, s: SymInt) -> Union[int, torch.SymInt]: + val = s.value + if s.type == "as_expr": + if val.expr_str in self.symbol_name_to_symbol: + sym = self.symbol_name_to_symbol[val.expr_str] + else: + sym = sympy.sympify(val.expr_str, locals=self.symbol_name_to_symbol) + if isinstance(sym, sympy.Symbol): + self.symbol_name_to_symbol[val.expr_str] = sym + + if vr := self.symbol_name_to_range.get(val.expr_str): + symbolic_shapes._constrain_symbol_range( + self.shape_env, + sym, + compiler_min=vr.lower, # type: ignore[arg-type] + compiler_max=vr.upper, # type: ignore[arg-type] + runtime_min=vr.lower, # type: ignore[arg-type] + runtime_max=vr.upper # type: ignore[arg-type] + ) + + if val.hint is None: + hint = None + else: + assert val.hint.type == "as_int" + hint = val.hint.value + + return self.shape_env.create_symintnode(sym, hint=hint) + elif s.type == "as_int": + assert isinstance(val, int) + return val + else: + raise SerializeError( + f"SymInt has invalid field type {s.type} with value {s.value}" + ) + + def deserialize_sym_bool(self, s: SymBool) -> Union[bool, torch.SymBool]: + val = s.value + if s.type == "as_expr": + expr = sympy.sympify(val.expr_str, locals=self.symbol_name_to_symbol) + return self.shape_env.create_symboolnode(expr) + elif s.type == "as_bool": + assert isinstance(val, bool) + return val + else: + raise SerializeError( + f"SymBool has invalid field type {s.type} with value {s.value}" + ) + + def deserialize_tensor_meta( + self, + tensor_meta: TensorMeta, + fake_tensor_mode: FakeTensorMode, + ) -> FakeTensor: + with fake_tensor_mode: + return cast( + FakeTensor, + torch.empty_strided( + tuple(self.deserialize_sym_int(val) for val in tensor_meta.sizes), # type: ignore[misc] + tuple(self.deserialize_sym_int(val) for val in tensor_meta.strides), # type: ignore[misc] + device=deserialize_device(tensor_meta.device), + dtype=_SERIALIZE_TO_TORCH_DTYPE[tensor_meta.dtype], + ), + ) + + def deserialize_graph_output(self, output) -> torch.fx.Node: + if isinstance(output.value, TensorArgument): + return self.serialized_name_to_node[output.value.name] + elif isinstance(output.value, (SymIntArgument, SymBoolArgument)): + return self.serialized_name_to_node[output.value.as_name] + else: + raise SerializeError(f"Unable to deserialize output node {output}") + + def deserialize_graph(self, serialized_graph: Graph) -> torch.fx.Graph: + # Handle the tensor metas. + for name, tensor_value in serialized_graph.tensor_values.items(): + meta_val = self.deserialize_tensor_meta(tensor_value, self.fake_tensor_mode) + self.serialized_name_to_meta[name] = meta_val + + for name, sym_int_value in serialized_graph.sym_int_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_sym_int(sym_int_value) + + for name, sym_bool_value in serialized_graph.sym_bool_values.items(): + self.serialized_name_to_meta[name] = self.deserialize_sym_bool(sym_bool_value) + + # Inputs: convert to placeholder nodes in FX. + for input in serialized_graph.inputs: + placeholder_node = self.graph.placeholder(input.as_tensor.name) + self.sync_fx_node(input.as_tensor.name, placeholder_node) + + # Nodes: convert to call_function nodes. + for serialized_node in serialized_graph.nodes: + try: + target = self.deserialize_operator(serialized_node.target) + self.deserialize_node(serialized_node, target) + + except Exception as e: + raise SerializeError(f"Failed deserializing node {serialized_node}") from e + + # Outputs: convert to a single `output` node. + outputs = [] + for output in serialized_graph.outputs: + outputs.append(self.deserialize_graph_output(output)) + + if serialized_graph.is_single_tensor_return: + assert len(outputs) == 1 + outputs = outputs[0] # type: ignore[assignment] + else: + outputs = tuple(outputs) # type: ignore[assignment] + + output_node = self.graph.output(outputs) + + if serialized_graph.is_single_tensor_return: + output_node.meta["val"] = output_node.args[0].meta["val"] + else: + output_node.meta["val"] = tuple( + arg.meta["val"] for arg in output_node.args[0] + ) + + return self.graph + + def deserialize_node(self, serialized_node: Node, target: Callable) -> None: + if target.__module__ == "_operator": # TODO(zhxchen17) Follow up on this. + name = serialized_node.outputs[0].value.as_name + args = self.deserialize_sym_op_inputs(serialized_node.inputs) + + fx_node = self.graph.create_node("call_function", target, args, {}, name) + self.deserialize_sym_op_outputs(serialized_node, fx_node) + elif isinstance(target, torch._ops.HigherOrderOperator): + assert ( + len(serialized_node.outputs) == 1 + and serialized_node.outputs[0].type in ("as_tensors", "as_tensor") + ), "Only single tensor output or list of tensor output is supported for higher order operators." + + output = serialized_node.outputs[0] + + name = ( + output.value.name + if output.type == "as_tensor" + else None # FX will generate a name for us. + ) + args = tuple(self.deserialize_input(input.arg) for input in serialized_node.inputs) + fx_node = self.graph.create_node("call_function", target, args, {}, name) + + if output.type == "as_tensor": + self.sync_fx_node(name, fx_node) + if output.type == "as_tensors": + self.deserialize_multiple_outputs(serialized_node, fx_node) + + elif isinstance(target, torch._ops.OpOverload): + # For convenience: if this node returns a single tensor, name the + # newly-created node after it. This ensures that these tensor values + # have names that are consistent with serialized. + name = ( + serialized_node.outputs[0].value.name + if _is_single_tensor_return(target) + else None # FX will generate a name for us. + ) + args, kwargs = self.deserialize_inputs(target, serialized_node) + fx_node = self.graph.create_node("call_function", target, args, kwargs, name) + self.deserialize_outputs(serialized_node, fx_node) + else: + raise SerializeError(f"Unsupported target type for node {serialized_node}: {target}") + + fx_node.meta.update(self.deserialize_metadata(serialized_node.metadata)) + + def deserialize_input_spec(self, i: InputSpec) -> ep.InputSpec: + if i.user_input is not None: + return ep.InputSpec( + kind=ep.InputKind.USER_INPUT, + arg=self.deserialize_argument_spec(i.user_input.arg), + target=None + ) + elif i.parameter is not None: + return ep.InputSpec( + kind=ep.InputKind.PARAMETER, + arg=PyTensorArgument(name=i.parameter.arg.name), + target=i.parameter.parameter_name, + ) + elif i.buffer is not None: + return ep.InputSpec( + kind=ep.InputKind.BUFFER, + arg=PyTensorArgument(name=i.buffer.arg.name), + target=i.buffer.buffer_name, + ) + elif i.tensor_constant is not None: + return ep.InputSpec( + kind=ep.InputKind.CONSTANT_TENSOR, + arg=PyTensorArgument(name=i.tensor_constant.arg.name), + target=i.tensor_constant.tensor_constant_name, + ) + else: + raise AssertionError(f"Unkown input spec {i}") + + def deserialize_output_spec(self, o: OutputSpec) -> ep.OutputSpec: + if o.user_output is not None: + return ep.OutputSpec( + kind=ep.OutputKind.USER_OUTPUT, + arg=self.deserialize_argument_spec(o.user_output.arg), + target=None, + ) + elif o.loss_output is not None: + return ep.OutputSpec( + kind=ep.OutputKind.LOSS_OUTPUT, + arg=PyTensorArgument(name=o.loss_output.arg.name), + target=None, + ) + elif o.buffer_mutation is not None: + return ep.OutputSpec( + kind=ep.OutputKind.BUFFER_MUTATION, + arg=PyTensorArgument(name=o.buffer_mutation.arg.name), + target=o.buffer_mutation.buffer_name + ) + elif o.gradient_to_parameter is not None: + return ep.OutputSpec( + kind=ep.OutputKind.GRADIENT_TO_PARAMETER, + arg=PyTensorArgument(name=o.gradient_to_parameter.arg.name), + target=o.gradient_to_parameter.parameter_name + ) + elif o.gradient_to_user_input is not None: + return ep.OutputSpec( + kind=ep.OutputKind.GRADIENT_TO_USER_INPUT, + arg=PyTensorArgument(name=o.gradient_to_user_input.arg.name), + target=o.gradient_to_user_input.user_input_name + ) + else: + raise AssertionError(f"Unknown output spec {o}") + + def deserialize_signature(self, sig: GraphSignature) -> ep.ExportGraphSignature: + return ep.ExportGraphSignature( + input_specs=[self.deserialize_input_spec(i) for i in sig.input_specs], + output_specs=[self.deserialize_output_spec(o) for o in sig.output_specs] + ) + + def deserialize( + self, + serialized_graph_module: GraphModule, + symbol_name_to_range: Optional[Dict[str, symbolic_shapes.ValueRanges]] = None, + constants: Optional[Dict[str, Any]] = None, + ) -> Result: + self.shape_env = symbolic_shapes.ShapeEnv(assume_static_by_default=True) + self.fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=False, + allow_non_fake_inputs=True, + shape_env=self.shape_env, + ) + self.symbol_name_to_symbol: Dict[str, sympy.Symbol] = {} + self.symbol_name_to_range = {} if symbol_name_to_range is None else symbol_name_to_range + self.constants = {} if constants is None else constants + + self.deserialize_graph(serialized_graph_module.graph) + + sig = self.deserialize_signature(serialized_graph_module.signature) + module_call_graph = self.deserialize_module_call_graph(serialized_graph_module.module_call_graph) + return GraphModuleDeserializer.Result( + graph_module=torch._export.exported_program._create_graph_module_for_export(self.module, self.graph), + signature=sig, + module_call_graph=module_call_graph, + names_to_symbols=self.symbol_name_to_symbol, + ) + + def sync_fx_node(self, name: str, fx_node: torch.fx.Node): + if name in self.serialized_name_to_node: + raise SerializeError(f"Node {name} has already been deserialized before.") + self.serialized_name_to_node[name] = fx_node + assert "val" not in fx_node.meta + fx_node.meta["val"] = self.serialized_name_to_meta[name] + + def deserialize_sym_op_inputs(self, inputs): + return tuple(self.deserialize_input(input.arg) for input in inputs) + + def deserialize_inputs(self, target: torch._ops.OpOverload, serialized_node: Node): + schema_args = target._schema.arguments + actual_args = { + input.name: self.deserialize_input(input.arg) for input in serialized_node.inputs + } + args = [] + kwargs = {} + for schema_arg in schema_args: + is_positional = not schema_arg.has_default_value() and not schema_arg.kwarg_only + if is_positional: + args.append(actual_args[schema_arg.name]) + else: + if schema_arg.name in actual_args: + kwargs[schema_arg.name] = actual_args[schema_arg.name] + return tuple(args), kwargs + + def deserialize_input(self, inp: Argument) -> Any: + value = inp.value + typ_ = inp.type + if typ_ == "as_none": + # None should converted as None, but is encoded as bool in serialized + # Convert serialized object to torch equivalent + return None + elif typ_ == "as_scalar_type": + return _SERIALIZE_TO_TORCH_DTYPE[value] + elif typ_ == "as_memory_format": + return _SERIALIZE_TO_TORCH_MEMORY_FORMAT[value] + elif typ_ == "as_layout": + return _SERIALIZE_TO_TORCH_LAYOUT[value] + elif typ_ == "as_graph": + assert isinstance(value, GraphArgument) + with self.save_graph_module(): + self.deserialize_graph(value.graph) + submodule = torch._export.exported_program._create_graph_module_for_export(self.module, self.graph) + self.module.register_module(value.name, submodule) + return self.graph.create_node( + "get_attr", + value.name, + name=value.name, + ) + elif isinstance(value, Device): + return deserialize_device(value) + elif isinstance(value, TensorArgument): + return self.serialized_name_to_node[value.name] + elif isinstance(value, (int, float, bool)): + return value + elif isinstance(value, str): + return str(value) + elif isinstance(value, (SymIntArgument, SymBoolArgument)): + return self.deserialize_sym_argument(value) + elif isinstance(value, list): + if len(value) == 0: + return [] + elif isinstance(value[0], TensorArgument): + result = [] + for arg in value: + result.append(self.serialized_name_to_node[arg.name]) + return result + elif isinstance(value[0], (int, float, bool)): + # convert from serialized.python.types.List to python list + return list(value) + elif isinstance(value[0], (SymIntArgument, SymBoolArgument)): + return [self.deserialize_sym_argument(arg) for arg in value] + elif isinstance(value[0], OptionalTensorArgument): + def deserialize_optional_tensor_args(a): + if a.type == "as_none": + return None + elif a.type == "as_tensor": + return self.serialized_name_to_node[a.value] + else: + raise SerializeError(f"Unhandled argument {inp}") + return list(map(deserialize_optional_tensor_args, value)) + else: + raise SerializeError(f"Unhandled argument {inp}") + elif isinstance(value, CustomObjArgument): + return self.constants[value.name] + else: + raise SerializeError(f"Unhandled argument {inp}") + + def deserialize_sym_argument(self, sym_int_arg): + if sym_int_arg.type == "as_int": + return sym_int_arg.as_int + else: + assert sym_int_arg.type == "as_name" + return self.serialized_name_to_node[sym_int_arg.as_name] + + def deserialize_sym_op_outputs(self, serialized_node: Node, fx_node: torch.fx.Node): + self.sync_fx_node(serialized_node.outputs[0].value.as_name, fx_node) + + def deserialize_outputs(self, serialized_node: Node, fx_node: torch.fx.Node): + # Simple case for single tensor return. + assert isinstance(fx_node.target, torch._ops.OpOverload) + returns = fx_node.target._schema.returns + + # Check single value return + if len(returns) == 0: + return + if _is_single_tensor_return(fx_node.target): + self.sync_fx_node(serialized_node.outputs[0].as_tensor.name, fx_node) + return + elif len(returns) == 1 and isinstance(serialized_node.outputs[0].value, (SymIntArgument, SymBoolArgument)): + self.sync_fx_node(serialized_node.outputs[0].value.as_name, fx_node) + return + + self.deserialize_multiple_outputs(serialized_node, fx_node) + + def deserialize_multiple_outputs(self, serialized_node: Node, fx_node: torch.fx.Node) -> None: + deserialized_metadata = self.deserialize_metadata(serialized_node.metadata) + + def generate_getitem(meta_val, fx_node: torch.fx.Node, arg: TensorArgument, idx: int): + name = arg.name + individual_output = self.graph.create_node( + "call_function", + operator.getitem, + (fx_node, idx), + name=name, + ) + self.sync_fx_node(name, individual_output) + meta_val.append(self.serialized_name_to_meta[name]) + # The derived `getitem` nodes should have the same stacktrace as the + # original `fx_node` + individual_output.meta.update(deserialized_metadata) + + def generate_getitems(meta_val, fx_node: torch.fx.Node, args): + for idx, arg in enumerate(args): + if isinstance(arg, Argument): + arg = arg.value + if isinstance(arg, TensorArgument): + generate_getitem(meta_val, fx_node, arg, idx) + elif isinstance(arg, (list, tuple)): + list_output = self.graph.create_node( + "call_function", + operator.getitem, + (fx_node, idx), + ) + meta_val.append([]) + generate_getitems(meta_val[-1], list_output, arg) + list_output.meta.update(deserialized_metadata) + list_output.meta['val'] = meta_val[-1] + else: + raise NotImplementedError(f"Unimplemented node output type: {arg}") + + # Convert multiple return types to FX format. + # In FX, each node only returns one value. So in order to represent + # multiple return values, we have to emit a `getitem` node for each + # return value. + # This performs the inverse mapping of the `serialize_outputs` call in + # serialization, see [NOTE: Multiple outputs] + meta_val: List[Any] = [] + if len(serialized_node.outputs) == 1: + assert isinstance(serialized_node.outputs[0].value, list) + assert isinstance(serialized_node.outputs[0].value[0], TensorArgument) + generate_getitems(meta_val, fx_node, serialized_node.outputs[0].as_tensors) + else: + generate_getitems(meta_val, fx_node, serialized_node.outputs) + + # also update the metaval for `fx_node` to be a list(meta) + fx_node.meta["val"] = tuple(meta_val) + self.serialized_name_to_node[fx_node.name] = fx_node + + def deserialize_metadata(self, metadata: Dict[str, str]) -> Dict[str, Any]: + ret: Dict[str, Any] = {} + if stack_trace := metadata.get("stack_trace"): + ret["stack_trace"] = stack_trace + + def deserialize_meta_func(serialized_target: str): + module = None + if serialized_target.startswith("torch.nn"): + module = torch.nn + serialized_target_names = serialized_target.split(".")[2:] + elif serialized_target.startswith("torch"): + module = torch + serialized_target_names = serialized_target.split(".")[1:] + else: + return self.deserialize_operator(serialized_target) + + target = module + for name in serialized_target_names: + if not hasattr(target, name): + return serialized_target + else: + target = getattr(target, name) + return target + + if nn_module_stack_str := metadata.get("nn_module_stack"): + # Originally serialized to "key,orig_path,type_str" + def import_nn_module_stack(key, path, ty): + return key, (path, ty) + nn_module_stack = dict( + import_nn_module_stack(*item.split(",")) + for item in nn_module_stack_str.split(ST_DELIMITER) + ) + ret["nn_module_stack"] = nn_module_stack + + if source_fn_st_str := metadata.get("source_fn_stack"): + # Originally serializes to "fx_node_name,op_str" + source_fn_st = [] + for source_fn_str in source_fn_st_str.split(ST_DELIMITER): + name, target_str = source_fn_str.split(",") + source_fn_st.append((name, deserialize_meta_func(target_str))) + ret["source_fn_stack"] = source_fn_st + return ret + + def deserialize_argument_spec(self, x: Argument) -> ep.ArgumentSpec: + if x.as_tensor is not None: + return PyTensorArgument(name=x.as_tensor.name) + elif x.as_sym_int is not None: + return PySymIntArgument(name=x.as_sym_int.as_name) + else: + return PyConstantArgument(value=self.deserialize_input(x)) + + def deserialize_module_call_signature(self, module_call_signature: ModuleCallSignature) -> ep.ModuleCallSignature: + return ep.ModuleCallSignature( + inputs=[self.deserialize_argument_spec(x) for x in module_call_signature.inputs], + outputs=[self.deserialize_argument_spec(x) for x in module_call_signature.outputs], + in_spec=treespec_loads(module_call_signature.in_spec), + out_spec=treespec_loads(module_call_signature.out_spec), + ) + + def deserialize_module_call_graph(self, module_call_graph: List[ModuleCallEntry]) -> List[ep.ModuleCallEntry]: + return [ + ep.ModuleCallEntry( + fqn=entry.fqn, + signature=self.deserialize_module_call_signature(entry.signature) if entry.signature else None, + ) for entry in module_call_graph + ] + + +class ExportedProgramDeserializer: + def __init__(self, expected_opset_version: Optional[Dict[str, int]] = None): + self.expected_opset_version: Dict[str, int] = {} + if expected_opset_version: + self.expected_opset_version.update(expected_opset_version) + if "aten" not in self.expected_opset_version: + self.expected_opset_version["aten"] = torch._C._get_max_operator_version() + + def deserialize_range_constraints( + self, + symbol_name_to_range: Dict[str, symbolic_shapes.ValueRanges], + symbol_name_to_symbol: Dict[str, sympy.Symbol], + ) -> Dict[sympy.Symbol, ValueRanges]: + range_constraints = {} + for k, v in symbol_name_to_range.items(): + if symbol := symbol_name_to_symbol.get(k): + range_constraints[symbol] = v # type: ignore[arg-type] + else: + log.warning(f"Symbol {k} did not appear in the graph that was deserialized") # noqa: G004 + return range_constraints + + def deserialize( + self, serialized_artifact: SerializedArtifact + ) -> ep.ExportedProgram: + assert isinstance(serialized_artifact.exported_program, ExportedProgram) + + if serialized_artifact.exported_program.schema_version != SCHEMA_VERSION: + raise SerializeError( + f"Serialized schema version {serialized_artifact.exported_program.schema_version} " + f"does not match our current schema version {SCHEMA_VERSION}." + ) + + symbol_name_to_range = { + k: symbolic_shapes.ValueRanges(_int_to_sympy_int(v.min_val), _int_to_sympy_int(v.max_val)) + for k, v in serialized_artifact.exported_program.range_constraints.items() + } + constants = deserialize_torch_artifact(serialized_artifact.constants) + + # TODO: No need to do this once CustomClassHolders are lifted to the ExportedProgram + tensor_constants = { + k: v for k, v in constants.items() if isinstance(v, torch.Tensor) + } + + res = ( + GraphModuleDeserializer() + .deserialize( + serialized_artifact.exported_program.graph_module, + symbol_name_to_range, + constants, + ) + ) + range_constraints = self.deserialize_range_constraints( + symbol_name_to_range, res.names_to_symbols, + ) + model_opset_version: Optional[Dict[str, int]] = serialized_artifact.exported_program.opset_version + self._validate_model_opset_version(model_opset_version) + + upgrader = GraphModuleOpUpgrader(self.expected_opset_version, model_opset_version) + + state_dict = deserialize_torch_artifact(serialized_artifact.state_dict) + + exported_program = ep.ExportedProgram( + res.graph_module, + res.graph_module.graph, + res.signature, + state_dict, # type: ignore[arg-type] + range_constraints, + [], + res.module_call_graph, + None, + load_verifier(serialized_artifact.exported_program.dialect), + tensor_constants=tensor_constants, + ) + return upgrader.upgrade(exported_program) + + def _validate_model_opset_version(self, model_opset_version: Optional[Dict[str, int]]): + """Compare model_opset_version with expected_opset_version and raise error if we can't resolve the version + difference. + E.g., model_opset_version = {"aten": 3, "custom": 4} + expected_opset_version = {"aten": 4, "custom": 4} + This means we can use an upgrader for ATen to reconcile the deserialized model. + + The logic of this method: + + For common op namespaces: + 1. if model version < expected version, this case can be handled by upgraders. + 2. if model version > expected version, we need downgraders but not implemented yet. + 3. if model version == expected version, we don't need extra handling. + + For op namespace only in model_opset_version, we should give a warning because it is missing from + expected_opset_version. + """ + if not model_opset_version: + raise RuntimeError("Serialized model should have opset version.") + common_namespaces = {key for key in model_opset_version if key in self.expected_opset_version} + for namespace in common_namespaces: + assert ( + isinstance(model_version := model_opset_version[namespace], int) + ), f"model_opset_version value should be int, got {model_opset_version[namespace]}" + + assert ( + isinstance(compiler_version := self.expected_opset_version[namespace], int) + ), f"expected_opset_version value should be int, got {self.expected_opset_version[namespace]}" + + # TODO(larryliu0820): Add support for upgrader & downgrader + if model_version != compiler_version: + raise NotImplementedError( + f"Model opset version {model_opset_version} doesn't match to compiler opset version " + f"{self.expected_opset_version}! Upgrader/downgrader is not implemented yet." + ) + for namespace in model_opset_version: + if namespace in common_namespaces: + continue + log.warning("Compiler doesn't have a version table for op namespace: {ns}. ", extra={"ns": namespace}) + + +class EnumEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, bytes): + return base64.b64encode(obj).decode('utf-8') + return super().default(obj) + + +def serialize( + exported_program: ep.ExportedProgram, + opset_version: Optional[Dict[str, int]] = None, +) -> SerializedArtifact: + exported_program._validate() + serialized_artifact = ( + ExportedProgramSerializer(opset_version).serialize(exported_program) + ) + assert isinstance(serialized_artifact.exported_program, ExportedProgram) + json_program = json.dumps( + dataclasses.asdict(serialized_artifact.exported_program), cls=EnumEncoder + ) + json_bytes = json_program.encode('utf-8') + artifact = SerializedArtifact( + json_bytes, + serialized_artifact.state_dict, + serialized_artifact.constants + ) + return artifact + + +def _dict_to_dataclass(cls, data): + assert not isinstance(cls, str), f"Unresolved class type: '{cls}'." + if typing.get_origin(cls) == typing.Union and type(None) in typing.get_args(cls): + if data is None: + return None + ty_args = typing.get_args(cls) + assert len(ty_args) == 2 + return _dict_to_dataclass(ty_args[0], data) + elif isinstance(cls, type) and issubclass(cls, _Union): + obj = cls(**data) + field_type = cls.__annotations__[obj.type] + setattr(obj, obj.type, _dict_to_dataclass(field_type, obj.value)) + return obj + elif dataclasses.is_dataclass(cls): + obj = cls(**data) # type: ignore[assignment] + type_hints = typing.get_type_hints(cls) + for f in dataclasses.fields(cls): + name = f.name + new_field_obj = _dict_to_dataclass(type_hints[name], getattr(obj, name)) + setattr(obj, name, new_field_obj) + return obj + elif isinstance(data, list): + if len(data) == 0: + return data + d_type = typing.get_args(cls)[0] + return [ + _dict_to_dataclass(d_type, d) + for d in data + ] + elif isinstance(data, dict): + v_type = typing.get_args(cls)[1] + return { + k: _dict_to_dataclass(v_type, v) + for k, v in data.items() + } + return data + + +def deserialize( + artifact: SerializedArtifact, + expected_opset_version: Optional[Dict[str, int]] = None, +) -> ep.ExportedProgram: + assert isinstance(artifact.exported_program, bytes) + exported_program_str = artifact.exported_program.decode('utf-8') + exported_program_dict = json.loads(exported_program_str) + serialized_exported_program = _dict_to_dataclass(ExportedProgram, exported_program_dict) + return ( + ExportedProgramDeserializer(expected_opset_version) + .deserialize( + SerializedArtifact( + serialized_exported_program, + artifact.state_dict, + artifact.constants + ) + ) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/upgrade.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/upgrade.py new file mode 100644 index 0000000000000000000000000000000000000000..d074c2ec9b4ed7da62a0820550b42bdf3db37d8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/serde/upgrade.py @@ -0,0 +1,201 @@ +import logging +from collections import defaultdict +from typing import Tuple, Dict, Optional, List + +import torch +from torch._export import export +from torch._export.pass_base import _ExportPassBase +from torch._export.pass_infra.node_metadata import NodeMetadata +from torch._export.pass_infra.proxy_value import ProxyValue +from torch._subclasses import FakeTensor +from torch.fx.node import Target, Argument +from torch.library import Library +from torch.utils._pytree import tree_unflatten +import torch._export.exported_program as ep +import re + +lib = Library("aten", "FRAGMENT") +impl_lib = Library("aten", "IMPL") + +log = logging.getLogger(__name__) + + +def get_target_version(versioned_upgrader_name: str) -> int: + """div_Scalar_0_3 is the name of the upgrader, meaning it applies to div.Scalar of version 0 to 3 and is + upgrading to version 4.""" + if not re.match("^.*_[0-9]+_[0-9]+$", versioned_upgrader_name): + raise RuntimeError(f"Upgrader name {versioned_upgrader_name} is invalid") + + return int(versioned_upgrader_name.split('_')[-1]) + 1 + + +def get_upgraders() -> Dict[str, Tuple[str, str]]: + """Getting upgraders entry map and operator version map and merge them into one dict.""" + upgraders = torch._C._get_upgraders_entry_map() + op_version_map = torch._C._get_operator_version_map() + output: Dict[str, Tuple[str, str]] = defaultdict(tuple) # type: ignore[arg-type] + for opname, entry_list in op_version_map.items(): + if not entry_list: + raise RuntimeError(f"Op version map has an empty entry for opname {opname}") + entry = entry_list[0] + old_schema = entry.old_schema + upgrader_name = entry.upgrader_name + upgrader_str = upgraders.get(upgrader_name, None) + if not upgrader_str: + raise RuntimeError(f"Can't find upgrader for op {opname} and upgrader name {upgrader_name}") + output[upgrader_name] = (old_schema, upgrader_str) + return output + + +class GraphModuleOpUpgrader: + """This upgrader is able to upgrade the old version of ops in a given GraphModule, if all upgraders are available. + To use it, retrieve upgraders from somewhere (TorchScript API or new API) and pass it into this upgrader. In + __init__() it does the following: + 1. parse the upgrader list and reorder for upgrading purpose. + 2. register old versions of operators as custom ops. + 3. prepare upgrader passes. + + In `upgrade()` API run these upgrader passes. + + An example of op_upgraders input: + { + "aten::div__Scalar_0_3": ( # versioned op name + "div._Scalar(self: Tensor, other: Scalar)", # old schema + ''' + def div__Scalar_0_3(self: torch.Tensor, other) -> torch.Tensor: # upgrader in literal string + if (self.is_floating_point() or isinstance(other, float)): + return self.true_divide_(other) + return self.divide_(other, rounding_mode='trunc') + ''', + ), + }, + + Note that we require the upgrader function to be runnable in Python (which is a stricter requirement than the + original TorchScript upgrader). + """ + + class UpgraderPass(_ExportPassBase): + def __init__(self, old_target: Target, new_target: Target): + super().__init__() + self.old_target = old_target + self.new_target = new_target + + def call_operator( + self, + op, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + meta: NodeMetadata, + ) -> ProxyValue: + if op == self.old_target: + return super().call_operator(self.new_target, args, kwargs, meta) + return super().call_operator(op, args, kwargs, meta) + + def __init__( + self, + compiler_opset_version: Optional[Dict[str, int]] = None, + model_opset_version: Optional[Dict[str, int]] = None, + op_upgraders: Optional[Dict[str, Tuple[str, str]]] = None, + ): + self.op_upgraders: Dict[str, Tuple[str, str]] = get_upgraders() if not op_upgraders else op_upgraders + self.compiler_opset_version = compiler_opset_version if compiler_opset_version else {} + self.model_opset_version = model_opset_version if model_opset_version else {} + self.upgrader_passes: List[GraphModuleOpUpgrader.UpgraderPass] = GraphModuleOpUpgrader._populate_passes( + self._parse_upgraders(self.op_upgraders)) + + def _parse_upgraders(self, op_upgraders: Optional[Dict[str, Tuple[str, str]]] = None) -> List[Tuple[str, str]]: + """Reorder op_upgraders by version number, return an ordered list of tuples, containing old op schema as well + as the upgrader function string literal.""" + # TODO(larryliu0820): Add support for custom ops + op_namespace = "aten" + if not op_upgraders or op_namespace not in self.model_opset_version or op_namespace not in self.compiler_opset_version: + return [] + model_ver = self.model_opset_version[op_namespace] + curr_ver = self.compiler_opset_version[op_namespace] + + # key is the target version. div__Scalar_0_3 should have a key of 4. + versioned_upgraders: Dict[int, Tuple[str, str]] = {get_target_version(name): v for name, v in + op_upgraders.items()} + target_upgraders: List[Tuple[str, str]] = [] + # we need all upgraders from model_ver + 1 to curr_ver, inclusively + for ver in range(model_ver + 1, curr_ver + 1): + if ver in versioned_upgraders: + target_upgraders.append(versioned_upgraders[ver]) + else: + # we may be able to get away with missing upgraders, if that operator is missing from given graph + # module. + log.warning("Missing an upgrader to upgrade to version {ver}.", extra={"ver": ver}) + + return target_upgraders + + @staticmethod + def _populate_passes(upgraders: List[Tuple[str, str]]) -> List[UpgraderPass]: + """Given a list of upgraders, loop through it from lower version to higher version and create passes for all + upgraders. se torch.Library API to register old ops. Op name will be + __. Register upgraders as CompositeImplicitAutograd kernels. For example: + + lib = Library("aten", "FRAGMENT") + lib.define(old_schema) + + impl_lib = Library("aten", "IMPL") + impl_lib.impl("div__Scalar_0_3", div__Scalar_0_3, "CompositeImplicitAutograd") + + @:var upgraders: a list of tuples. The first element of the tuple is the old schema and the second is the + upgrader function literal text. + @:return upgrader passes, order matters + """ + + upgrader_passes = [] + + def register_old_op(name: str, schema: str, impl_str: str): + """Registers an old version operator using impl_name as old op name.""" + lib.define(schema) + try: + exec(impl_str) + except Exception as e: + raise RuntimeError(f"Invalid upgrader string: {impl_str}") from e + impl_lib.impl(name, locals()[name], "CompositeImplicitAutograd") + + for (schema, upgrader_str) in upgraders: + upgrader_name = upgrader_str.split('(')[0].split(' ')[-1] + op_name = schema.split('(')[0].split("::")[-1] + schema = schema.replace(op_name, upgrader_name) + try: + register_old_op(name=upgrader_name, schema=schema, impl_str=upgrader_str) + except RuntimeError as e: + if "with the same name and overload name multiple times" in str(e): + print(f"Registering {upgrader_name} multiple times") + else: + raise RuntimeError from e + old_op_target = getattr(torch.ops.aten, upgrader_name).default + # for example, the operator instance of "aten::div" is torch.op.aten.div.default. We need to append the + # "default" at the end. + op_name, overload_name = (op_name, "default") if "." not in op_name else tuple(op_name.split(".")[:2]) + new_op_target = getattr(getattr(torch.ops.aten, op_name), overload_name) + # Note that the graph will have op names in the graph, but actually they are of old versions. + upgrader_passes.append( + GraphModuleOpUpgrader.UpgraderPass(old_target=new_op_target, new_target=old_op_target)) + + return upgrader_passes + + def upgrade(self, exported_program: ep.ExportedProgram) -> ep.ExportedProgram: + """Run each upgrader pass and then retrace to decompose it. Each upgrader pass replaces the old version of + operators with a custom operator. The custom operator contains a CompositeImplicitAutograd kernel (the + upgrading function itself). After retrace, this custom operator will be decomposed into the ops used in the + upgrader. After all passes are applied, the exported program will be upgraded to the target version.""" + if not self.upgrader_passes: + return exported_program + + args = [n.meta.get("val", None) for n in exported_program.graph.nodes if n.op == "placeholder"] + args_real_tensors = [torch.ones(tuple(arg.size()), dtype=arg.dtype) if isinstance(arg, FakeTensor) else arg for + arg in args] + assert exported_program.call_spec.in_spec is not None + args, kwargs = tree_unflatten(args_real_tensors, exported_program.call_spec.in_spec) + assert kwargs == {} + + for _pass in self.upgrader_passes: + upgraded_program = exported_program._transform(_pass) + # NB: we have to retrace the graph_module instead of ep because of some failure. + exported_program = export(upgraded_program.module(), args, kwargs) + + return exported_program diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..455107d19e2f1b033bb488afed38d85f3d0e0aad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/utils.py @@ -0,0 +1,180 @@ +import dataclasses + +from typing import Any, Dict, Iterable, List, Optional, Tuple, Type + +import torch + +from torch._export import ExportedProgram + +from torch.utils._pytree import ( + _register_pytree_node, + Context, + DumpableContext, + FlattenFunc, + FromDumpableContextFn, + ToDumpableContextFn, + tree_flatten, + UnflattenFunc, +) + + +SERIALIZED_DATACLASS_TO_PYTHON_DATACLASS: Dict[str, Type[Any]] = {} + + +@torch._dynamo.disable +def _check_input_constraints_pre_hook(self, *args, **kwargs): + flat_args, _ = tree_flatten(args) + return _check_input_constraints_for_graph( + self.graph, + range_constraints=self.range_constraints, + equality_constraints=self.equality_constraints, + )(*flat_args) + + +def _check_input_constraints_for_graph( + graph: torch.fx.Graph, range_constraints, equality_constraints +): + from torch._export.passes.add_runtime_assertions_for_constraints_pass import ( + _AddRuntimeAssertionsForConstraintsPass, + ) + + def inner(*args): + # TODO(zhxchen17) Don't generate a runtime graph on the fly. + _assertion_graph = torch.fx.GraphModule({}, torch.fx.Graph()) + for p in graph.nodes: + if p.op != "placeholder": + continue + new_p = _assertion_graph.graph.placeholder(p.name) + new_p.meta = p.meta + _assertion_graph.graph.output(()) + _assertion_graph_res = _AddRuntimeAssertionsForConstraintsPass( + range_constraints, + equality_constraints, + )(_assertion_graph) + assert _assertion_graph_res is not None + _assertion_graph = _assertion_graph_res.graph_module + _assertion_graph(*args) + + return inner + + +def register_dataclass_as_pytree_node( + cls: Type[Any], + flatten_fn: Optional[FlattenFunc] = None, + unflatten_fn: Optional[UnflattenFunc] = None, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + return_none_fields: bool = False, +) -> None: + assert dataclasses.is_dataclass( + cls + ), f"Only dataclasses can be registered with this function: {cls}" + + serialized_type = f"{cls.__module__}.{cls.__qualname__}" + SERIALIZED_DATACLASS_TO_PYTHON_DATACLASS[serialized_type] = cls + + def default_flatten_fn(obj: Any) -> Tuple[List[Any], Context]: + flattened = [] + flat_names = [] + none_names = [] + for f in dataclasses.fields(obj): + name, val = f.name, getattr(obj, f.name) + if val is not None or return_none_fields: + flattened.append(val) + flat_names.append(name) + else: + none_names.append(name) + return flattened, (cls, flat_names, none_names) + + def default_unflatten_fn(values: Iterable[Any], context: Context) -> Any: + typ, flat_names, none_names = context + return typ(**dict(zip(flat_names, values)), **{k: None for k in none_names}) + + def default_to_dumpable_context(context: Context) -> DumpableContext: + return (serialized_type, context[1], context[2]) + + def default_from_dumpable_context(dumpable_context: DumpableContext) -> Context: + return ( + SERIALIZED_DATACLASS_TO_PYTHON_DATACLASS[dumpable_context[0]], + dumpable_context[1], + dumpable_context[2], + ) + + flatten_fn = flatten_fn if flatten_fn is not None else default_flatten_fn + unflatten_fn = unflatten_fn if unflatten_fn is not None else default_unflatten_fn + + if (to_dumpable_context is None) ^ (from_dumpable_context is None): + raise ValueError( + f"Both to_dumpable_context and from_dumpable_context for {cls} must " + "be None or registered." + ) + + to_dumpable_context = ( + to_dumpable_context + if to_dumpable_context is not None + else default_to_dumpable_context + ) + from_dumpable_context = ( + from_dumpable_context + if from_dumpable_context is not None + else default_from_dumpable_context + ) + + _register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def is_param(program: ExportedProgram, node: torch.fx.Node) -> bool: + """ + Checks if the given node is a parameter within the exported program + """ + + return node.name in program.graph_signature.inputs_to_parameters + + +def get_param( + program: ExportedProgram, + node: torch.fx.Node, +) -> Optional[torch.nn.Parameter]: + """ + Returns the parameter associated with the given node in the exported program. + Returns None if the node is not a parameter within the exported program + """ + + if is_param(program, node): + parameter_name = program.graph_signature.inputs_to_parameters[node.name] + return program.state_dict[parameter_name] + + return None + + +def is_buffer(program: ExportedProgram, node: torch.fx.Node) -> bool: + """ + Checks if the given node is a buffer within the exported program + """ + + return node.name in program.graph_signature.inputs_to_buffers + + +def get_buffer( + program: ExportedProgram, + node: torch.fx.Node, +) -> Optional[torch.Tensor]: + """ + Returns the buffer associated with the given node in the exported program. + Returns None if the node is not a buffer within the exported program + """ + + if is_buffer(program, node): + buffer_name = program.graph_signature.inputs_to_buffers[node.name] + return program.state_dict[buffer_name] + + return None diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/verifier.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/verifier.py new file mode 100644 index 0000000000000000000000000000000000000000..225ac16344c626c9f2ffdbae50e683f286126b15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/verifier.py @@ -0,0 +1,368 @@ +import inspect +import math +import operator +from collections.abc import Iterable +from typing import Any, Dict, final, List, Optional, Tuple, Type + +import torch +from torch._ops import HigherOrderOperator, OpOverload +from torch._subclasses.fake_tensor import FakeTensor +from torch.export.exported_program import ExportedProgram +from torch.export.graph_signature import ( + ExportGraphSignature, + InputKind, + SymIntArgument, + TensorArgument, +) +from torch.fx import GraphModule +from torch.fx.experimental.symbolic_shapes import SymBool, SymFloat, SymInt + + +class SpecViolationError(Exception): + pass + + +def is_functional(op: OpOverload) -> bool: + return not op._schema.is_mutable + + +def _check_has_fake_tensor(node: torch.fx.Node) -> None: + # TODO(angelayi): remove this in favor of _check_val + return _check_val(node) + + +def _check_val(node: torch.fx.Node) -> None: + def _check_correct_val(val): + if val is None: + return True + elif isinstance(val, (int, bool, str, float)): + return True + elif isinstance(val, (torch.memory_format, torch.dtype, torch.device, torch.layout)): + return True + elif isinstance(val, (FakeTensor, torch.Tensor)): # TODO(zhxchen17) Remove Tensor. + return True + elif isinstance(val, (SymInt, SymFloat, SymBool)): + return True + elif isinstance(val, Iterable): + return all(_check_correct_val(x) for x in val) + return False + + def _no_returns(op): + if not isinstance(op, OpOverload): + return False + return len(op._schema.returns) == 0 + + if "val" not in node.meta: + if node.op == "call_function" and _no_returns(node.target): + return + raise SpecViolationError(f"Node.meta {node.name} is missing val field.") + + val = node.meta["val"] + if not _check_correct_val(val): + raise SpecViolationError(f"Node.meta {node.name} has invalid val field {val}") + + +class _VerifierMeta(type): + _registry: Dict[str, Type['Verifier']] = {} + + def __new__(metacls, name, bases, attrs): + if bases: + if "check" in attrs or "_check_graph_module" in attrs: + raise SyntaxError("Overriding method check is not allowed.") + assert "dialect" in attrs and attrs["dialect"] != "ATEN" + else: + assert "check" in attrs + assert "_check_graph_module" in attrs + assert attrs["dialect"] == "ATEN" + + assert isinstance(attrs["dialect"], str) + ret = type.__new__(metacls, name, bases, attrs) + metacls._registry[attrs["dialect"]] = ret # type: ignore[assignment] + return ret + + +class Verifier(metaclass=_VerifierMeta): + dialect = "ATEN" + + def allowed_builtin_ops(self) -> List: + return [ + operator.getitem, + operator.add, + operator.mul, + operator.sub, + operator.truediv, + operator.ge, + operator.le, + operator.gt, + operator.lt, + operator.eq, + operator.ne, + operator.floordiv, + operator.mod, + operator.and_, + operator.or_, + operator.not_, + operator.pow, + operator.neg, + operator.abs, + math.ceil, + math.floor, + ] + + def allowed_op_types(self) -> Tuple[Type[Any], ...]: + return (OpOverload, HigherOrderOperator) + + def allowed_getattr_types(self) -> Tuple[Type[Any], ...]: + return (torch.fx.GraphModule,) + + def check_valid_op(self, op): + pass + + def check_additional(self, gm: GraphModule) -> None: + """ + Additional checks that are specific to some dialects. + """ + pass + + @final + def check(self, ep: ExportedProgram) -> None: + if not isinstance(ep.graph_signature, ExportGraphSignature): + # TODO Enforce type checking in the constructor. + return + self._check_graph_module(ep.graph_module) + try: + _verify_exported_program_signature(ep) + except SpecViolationError as e: + # TODO Remove this branch. + if ep.dialect == "EDGE": # !!! Don't change this allowlist. !!! + pass + else: + raise e + + @final + def _check_graph_module(self, gm: torch.fx.GraphModule) -> None: + def _allowed_getattr_types() -> Tuple[Type[Any], ...]: + ret = self.allowed_getattr_types() + assert not any(t is object for t in ret) + return ret + + def _check_valid_op(op) -> None: + def _allowed_builtin_ops() -> List: + ret = self.allowed_builtin_ops() + assert all(inspect.isbuiltin(op) for op in ret) + return ret + + def _allowed_op_types() -> Tuple[Type[Any], ...]: + ret = self.allowed_op_types() + assert not any(t is object for t in ret) + return ret + + # TODO Remove this allowlist. + _allowed_torch_functions = (torch.autograd.grad_mode.set_grad_enabled,) + + if not isinstance(op, _allowed_op_types()): + if op not in _allowed_builtin_ops() and op not in _allowed_torch_functions: + raise SpecViolationError( + f"Operator '{op}' is not an allowed operator type: {_allowed_op_types()}\n" + f"Valid builtin ops: {_allowed_builtin_ops()}" + f"Valid torch functions: {_allowed_torch_functions}" + ) + + if isinstance(op, OpOverload): + # All ops functional + if not is_functional(op): + raise SpecViolationError( + f"operator '{op}' is not functional" + ) + self.check_valid_op(op) + + for mod in gm.modules(): + if not isinstance(mod, torch.fx.GraphModule): + continue + + mod.graph.lint() + for node in mod.graph.nodes: + # TODO(T140410192): should have fake tensor for all dialects + if node.op in {"call_module", "call_method"}: + raise SpecViolationError( + f"call_module is not valid: got a class '{node.target}' ", + ) + + elif node.op == "call_function": + _check_val(node) + + _check_valid_op(node.target) + + elif node.op == "get_attr": + if not isinstance(node.target, str): + raise SpecViolationError( + f"Expected get_attr target to be string, but got {type(node.target)}" + ) + + attr = getattr(mod, node.target) + if isinstance(attr, torch.nn.Module): + def _is_type(name, ty): + return isinstance(getattr(attr, name, None), ty) + if type(attr).__name__ == "LoweredBackendModule": + if _is_type("backend_id", str) \ + and _is_type("processed_bytes", bytes) \ + and _is_type("compile_specs", list) \ + and hasattr(attr, "original_module"): + continue + else: + backend_id = getattr(attr, "backend_id", None) + processed_bytes = getattr(attr, "processed_bytes", None) + compile_specs = getattr(attr, "compile_specs", None) + raise SpecViolationError( + f"Invalid get_attr type {type(attr)}. \n" + f"LoweredBackendModule fields: " + f"backend_id(str) : {type(backend_id)}, " + f"processed_bytes(bytes) : {type(processed_bytes)}, " + f"compile_specs(list) : {type(compile_specs)}" + ) + + if not isinstance(attr, _allowed_getattr_types()): + raise SpecViolationError( + f"Invalid get_attr type {type(attr)}. \n" + f"Valid get_attr types: {_allowed_getattr_types()}" + ) + + + elif node.op == "placeholder": + _check_val(node) + # TODO(zhxchen17) + # elif node.op == "output": + # _check_flattened_outputs() + + self.check_additional(gm) + + +def _verify_exported_program_signature(exported_program) -> None: + # Check ExportedProgram signature matches + gs = exported_program.graph_signature + + # Check every node in the signature exists in the graph + input_node_names = [node.name for node in exported_program.graph.nodes if node.op == "placeholder"] + + if len(input_node_names) != len(gs.input_specs): + raise SpecViolationError( + f"Number of graph inputs ({len(input_node_names)}) " + f"does not match number of inputs in the graph signature ({len(gs.user_inputs)})" + ) + + for input_spec, node in zip(gs.input_specs, input_node_names): + if isinstance(input_spec.arg, (TensorArgument, SymIntArgument)): + if input_spec.arg.name != node: + raise SpecViolationError( + f"Input spec name {input_spec.arg.name} does not match node name {node}" + ) + + if input_spec.kind == InputKind.USER_INPUT: + continue + + elif input_spec.kind == InputKind.PARAMETER: + if not isinstance(input_spec.arg, TensorArgument): + raise SpecViolationError( + f"Parameter {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + param = input_spec.target + if param not in exported_program.state_dict: + raise SpecViolationError( + f"Parameter {param} is not in the state dict." + ) + + if not isinstance(exported_program.state_dict[param], torch.nn.Parameter): + raise SpecViolationError( + f"State dict entry for parameter {param} is not an instance of torch.nn.Parameter." + ) + + elif input_spec.kind == InputKind.BUFFER: + if not isinstance(input_spec.arg, TensorArgument): + raise SpecViolationError( + f"Buffer {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + buffer = input_spec.target + if buffer not in exported_program.state_dict: + raise SpecViolationError( + f"Buffer {buffer} is not in the state dict." + ) + elif input_spec.kind == InputKind.CONSTANT_TENSOR: + if not isinstance(input_spec.arg, TensorArgument): + raise SpecViolationError( + f"Constant tensor {input_spec.name} is not a tensor argument. Found {input_spec.arg} instead." + ) + if input_spec.target is None: + raise SpecViolationError( + f"InputSpec for {input_spec.name} has no target." + ) + + tensor_const = input_spec.target + if tensor_const not in exported_program.tensor_constants: + raise SpecViolationError( + f"Constant tensor {tensor_const} is not in the tensor constants dictionary." + ) + else: + raise SpecViolationError( + f"Unknown InputKind {input_spec.kind}." + ) + + # Check outputs + output_node = list(exported_program.graph.nodes)[-1] + assert output_node.op == "output" + output_nodes = [arg.name for arg in output_node.args[0]] + + if len(output_nodes) != len(gs.output_specs): + raise SpecViolationError( + f"Number of output nodes {len(output_nodes)} is different " + "Than the number of outputs specified by the graph signature: \n" + f"Number of mutated buffers: {len(gs.buffers_to_mutate)}. \n" + f"Number of user outputs: {len(gs.user_outputs)}. \n" + ) + + end = len(gs.buffers_to_mutate) + len(gs.user_inputs_to_mutate) + mutate_nodes: List[str] = output_nodes[:end] + user_output_nodes = output_nodes[end:end + len(gs.user_outputs)] + + for mutation_node in mutate_nodes: + if mutation_node in gs.buffers_to_mutate: + if gs.buffers_to_mutate[mutation_node] not in gs.buffers: + raise SpecViolationError( + f"Buffer output {mutation_node} does not point to a buffer that exists. \n" + f"Dict of buffers that are mutated, in order: {gs.buffers_to_mutate} \n" + f"Buffer nodes available: {gs.buffers} \n" + ) + elif mutation_node in gs.user_inputs_to_mutate: + if gs.user_inputs_to_mutate[mutation_node] not in gs.user_inputs: + raise SpecViolationError( + f"User input output {mutation_node} does not point to a user input that exists. \n" + f"Dict of user inputs that are mutated, in order: {gs.user_inputs_to_mutate} \n" + f"User input nodes available: {gs.user_inputs} \n") + else: + raise SpecViolationError( + f"Mutation node {mutation_node} is neither a buffer nor a user input. " + f"Buffers to mutate: {gs.buffers_to_mutate}, User inputs to mutate: {gs.user_inputs_to_mutate}" + ) + + for user_output_node, user_output_name in zip(user_output_nodes, gs.user_outputs): + if user_output_node != user_output_name: + raise SpecViolationError( + f"User output {user_output_node} is not in the correct " + "order or is not found in the " + f"exported program's user_output list: {gs.user_outputs}. " + ) + + +def load_verifier(dialect: str) -> Optional[Type[Verifier]]: + if dialect == "ATEN": + return _VerifierMeta._registry.get(dialect) + return _VerifierMeta._registry[dialect] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_export/wrappers.py b/env-llmeval/lib/python3.10/site-packages/torch/_export/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..8f3d2873b8c70991d499eb120a79f2927def0f88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_export/wrappers.py @@ -0,0 +1,102 @@ +from contextlib import contextmanager + +import torch +import torch._custom_ops +from torch._C import DispatchKey +from torch._higher_order_ops.utils import autograd_not_implemented +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree +from torch.utils import _pytree as pytree + + +_export_tracepoint = HigherOrderOperator("_export_tracepoint") + + +@_export_tracepoint.py_impl(ProxyTorchDispatchMode) +def export_tracepoint_dispatch_mode(mode, *args, **kwargs): + if not mode.enable_tracing: + return _export_tracepoint(*args, **kwargs) + p_args, p_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, (args, kwargs)) + proxy = mode.tracer.create_proxy( + "call_function", _export_tracepoint, p_args, p_kwargs + ) + return track_tensor_tree(args, proxy, constant=None, tracer=mode.tracer) + + +@_export_tracepoint.py_impl(FakeTensorMode) +def export_tracepoint_fake_tensor_mode(mode, *args, **kwargs): + with mode: + return args + + +@_export_tracepoint.py_functionalize_impl +def export_tracepoint_functional(ctx, *args, **kwargs): + unwrapped_args = ctx.unwrap_tensors(args) + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + + with ctx.redispatch_to_next(): + out = _export_tracepoint(*unwrapped_args, **unwrapped_kwargs) + return ctx.wrap_tensors(out) + + +_export_tracepoint.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(_export_tracepoint, deferred_error=True) +) + + +@_export_tracepoint.py_impl(DispatchKey.CPU) +def export_tracepoint_cpu(*args, **kwargs): + return args + + +def _wrap_submodule(mod, path, module_call_specs): + assert isinstance(mod, torch.nn.Module) + assert path != "" + submodule = mod + for name in path.split("."): + if not hasattr(submodule, name): + raise RuntimeError(f"Couldn't find submodule at path {path}") + submodule = getattr(submodule, name) + + def update_module_call_signatures(path, in_spec, out_spec): + assert path not in module_call_specs + module_call_specs[path] = {"in_spec": in_spec, "out_spec": out_spec} + + assert "forward" not in submodule.__dict__ + wrapped_forward = submodule.forward + + def check_flattened(flat_args): + for a in flat_args: + if not (isinstance(a, (torch.Tensor, str, int, float, bool)) or a is None): + raise AssertionError( + f"Only Tensors or scalars are supported as pytree flattened inputs, got: {a}" + ) + + def wrapper(self, *args, **kwargs): + flat_args, in_spec = pytree.tree_flatten((args, kwargs)) + check_flattened(flat_args) + flat_args = _export_tracepoint(*flat_args, kind="module_call_inputs", path=path) + args, kwargs = pytree.tree_unflatten(flat_args, in_spec) + res = wrapped_forward(*args, **kwargs) + flat_res, out_spec = pytree.tree_flatten(res) + check_flattened(flat_res) + flat_res = _export_tracepoint(*flat_res, kind="module_call_outputs", path=path) + update_module_call_signatures(path, in_spec, out_spec) + return pytree.tree_unflatten(flat_res, out_spec) + + submodule.forward = wrapper.__get__(submodule, type(submodule)) + return submodule + + +@contextmanager +def _wrap_submodules(f, preserve_signature, module_call_signatures): + tasks = [] + + try: + for path in preserve_signature: + tasks.append(_wrap_submodule(f, path, module_call_signatures)) + yield + finally: + for submodule in tasks: + del submodule.__dict__["forward"] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/contrib/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b04254e761da206b1bc07afd69f9eff712e6d7e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..840b40ba1f2936a364051847569e2819427db368 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py b/env-llmeval/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..87c325948a8b111d42409140a5d1f8150342794c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py @@ -0,0 +1,142 @@ +import time +from collections import defaultdict +from functools import partial +from typing import DefaultDict + +import torch + + +# Unfortunately it doesn't seem as if there was any way to get TensorBoard to do +# anything without having TF installed, and so this file has a hard dependency on it +# as well. It really is a debugging tool, so it doesn't matter. +try: + from tensorflow.core.util import event_pb2 + from tensorflow.core.framework import graph_pb2 + from tensorflow.python.summary.writer.writer import FileWriter +except ImportError: + raise ImportError("TensorBoard visualization of GraphExecutors requires having " + "TensorFlow installed") from None + + +def dump_tensorboard_summary(graph_executor, logdir): + with FileWriter(logdir) as w: + pb_graph = visualize(graph_executor) + evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString()) + w.add_event(evt) + + +def visualize(graph, name_prefix='', pb_graph=None, executors_it=None): + """Visualizes an independent graph, or a graph executor.""" + value_map = {} + pb_graph = pb_graph or graph_pb2.GraphDef() + + if isinstance(graph, torch._C.GraphExecutorState): + visualize_graph_executor(graph, name_prefix, pb_graph, + partial(visualize, pb_graph=pb_graph)) + return pb_graph + + # Set up an input node + input_node = pb_graph.node.add(op='input', name=name_prefix + 'input') + for i, value in enumerate(graph.param_node().outputs()): + value_map[value.unique()] = name_prefix + 'input:' + str(i) + + visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it) + + # Gather all outputs + return_node = pb_graph.node.add(op='output', name=name_prefix + 'output') + for value in graph.return_node().inputs(): + return_node.input.append(value_map[value.unique()]) + + return pb_graph + + +def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph): + """Append the state of a given GraphExecutor to the graph protobuf. + + Args: + state (GraphExecutor or GraphExecutorState): GraphExecutor to display. + name_prefix (str): Name prefix of the containing subgraph. + pb_graph (GraphDef): graph to append to. + inline_graph (Callable): a function that handles setting up a value_map, + so that some graphs in here can be inlined. This is necessary, because + this will simply be `visualize` for the top-level GraphExecutor, + or `inline_graph` for all nested ones. + + The signature should look like (Graph, name_prefix) -> (). + It will be called exactly once. + + The strategy is to embed all different configurations as independent subgraphs, + while inlining the original graph as the one that actually produces the values. + """ + if state.autograd_fallback_graph is not None: + visualize(graph=state.autograd_fallback_graph, + name_prefix=name_prefix + 'autograd_fallback/', + pb_graph=pb_graph, + executors_it=iter(state.autograd_fallback.executors())) + + for i, (arg_spec, plan) in enumerate(state.execution_plans.items()): + subgraph_name = name_prefix + f'plan{i}/' + + # Create a disconnected node that will keep information regarding the input + # types of this trace. This is unfortunately a bit too verbose to be included + # in the subgraph name. + input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name) + input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii') + + visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors())) + + # Show gradient as an independent subgraph of this plan + if plan.grad_executor is not None: + grad_subgraph_name = subgraph_name + 'grad/' + visualize(plan.grad_executor, grad_subgraph_name, pb_graph) + + return inline_graph(state.graph, name_prefix + 'original/') + + +def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None): + """Recursive part of visualize (basically skips setting up the input and output nodes).""" + def inline_graph(subgraph, name, node): + rec_value_map = {inp.unique(): value_map[val.unique()] + for inp, val in zip(subgraph.inputs(), node.inputs())} + visualize_rec(graph=subgraph, + value_map=rec_value_map, + name_prefix=name, + pb_graph=pb_graph) + for out, val in zip(subgraph.outputs(), node.outputs()): + value_map[val.unique()] = rec_value_map[out.unique()] + + op_id_counter: DefaultDict[str, int] = defaultdict(int) + + def name_for(node): + kind = node.kind()[node.kind().index('::') + 2:] + op_id_counter[kind] += 1 + return kind, name_prefix + kind + '_' + str(op_id_counter[kind]) + + def add_fusion_group(node): + op, name = name_for(node) + inline_graph(node.g('Subgraph'), name + '/', node) + + def add_graph_executor(node): + op, name = name_for(node) + if executors_it is None: + add_node(node) + else: + ge = next(executors_it) + visualize_graph_executor(ge, name + '/', pb_graph, + partial(inline_graph, node=node)) + + def add_node(node): + if node.kind() == 'prim::FusionGroup': + return add_fusion_group(node) + elif node.kind() == 'prim::GraphExecutor': + return add_graph_executor(node) + op, name = name_for(node) + pb_node = pb_graph.node.add(op=op, name=name) + for value in node.inputs(): + pb_node.input.append(value_map[value.unique()]) + # TODO: handle attrs + for i, value in enumerate(node.outputs()): + value_map[value.unique()] = name + ':' + str(i) + + for node in graph.nodes(): + add_node(node) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/func/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/func/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f14ff503745115df800557aaa02840eaef45a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/func/__init__.py @@ -0,0 +1,14 @@ +from torch._functorch.eager_transforms import ( + grad_and_value, + vjp, + jvp, + jacrev, + jacfwd, + hessian, + functionalize, + linearize +) +from torch._functorch.apis import grad +from torch._functorch.functional_call import functional_call, stack_module_state +from torch._functorch.batch_norm_replacement import replace_all_batch_norm_modules_ +from torch._functorch.apis import vmap diff --git a/env-llmeval/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..863286b81d3204f86ca041b330edee83bbad60a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/glog.cmake b/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/glog.cmake new file mode 100644 index 0000000000000000000000000000000000000000..bb03e81f29e3afed43ba95260cc5c298be881f72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/glog.cmake @@ -0,0 +1,70 @@ +# ---[ glog + +# We will try to use the config mode first, and then manual find. +find_package(glog CONFIG QUIET) +if(NOT TARGET glog::glog) + find_package(glog MODULE QUIET) +endif() + +if(TARGET glog::glog) + message(STATUS "Caffe2: Found glog with new-style glog target.") +elseif(GLOG_FOUND) + message( + STATUS + "Caffe2: Found glog with old-style glog starget. Glog never shipped " + "old style glog targets, so somewhere in your cmake path there might " + "be a custom Findglog.cmake file that got triggered. We will make a " + "best effort to create the new style glog target for you.") + add_library(glog::glog UNKNOWN IMPORTED) + set_property( + TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY}) + set_property( + TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${GLOG_INCLUDE_DIR}) +else() + message(STATUS "Caffe2: Cannot find glog automatically. Using legacy find.") + + # - Try to find Glog + # + # The following variables are optionally searched for defaults + # GLOG_ROOT_DIR: Base directory where all GLOG components are found + # + # The following are set after configuration is done: + # GLOG_FOUND + # GLOG_INCLUDE_DIRS + # GLOG_LIBRARIES + # GLOG_LIBRARYRARY_DIRS + + include(FindPackageHandleStandardArgs) + set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") + if(NOT WIN32) + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_ROOT_DIR}) + endif() + + find_library(GLOG_LIBRARY glog + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES lib lib64) + + find_package_handle_standard_args(glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) + + if(GLOG_FOUND) + message(STATUS + "Caffe2: Found glog (include: ${GLOG_INCLUDE_DIR}, " + "library: ${GLOG_LIBRARY})") + add_library(glog::glog UNKNOWN IMPORTED) + set_property( + TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY}) + set_property( + TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${GLOG_INCLUDE_DIR}) + endif() +endif() + +# After above, we should have the glog::glog target now. +if(NOT TARGET glog::glog) + message(WARNING + "Caffe2: glog cannot be found. Depending on whether you are building " + "Caffe2 or a Caffe2 dependent library, the next warning / error will " + "give you more info.") +endif() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake b/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..2149086394b4b3d207d4d031db6448012ec11fdd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake @@ -0,0 +1,39 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "tensorpipe_uv" for configuration "Release" +set_property(TARGET tensorpipe_uv APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe_uv PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_uv ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_uv "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" ) + +# Import target "tensorpipe" for configuration "Release" +set_property(TARGET tensorpipe APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" ) + +# Import target "tensorpipe_cuda" for configuration "Release" +set_property(TARGET tensorpipe_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe_cuda PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_cuda ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_cuda "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake b/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..31cc4794b7b83695f9bea33ffb48340cd5e89713 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake @@ -0,0 +1,114 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6...3.17) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget tensorpipe_uv tensorpipe tensorpipe_cuda) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + unset(_targetsDefined) + unset(_targetsNotDefined) + unset(_expectedTargets) + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target tensorpipe_uv +add_library(tensorpipe_uv STATIC IMPORTED) + +set_target_properties(tensorpipe_uv PROPERTIES + INTERFACE_LINK_LIBRARIES "\$;\$;\$" +) + +# Create imported target tensorpipe +add_library(tensorpipe STATIC IMPORTED) + +set_target_properties(tensorpipe PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "\$" +) + +# Create imported target tensorpipe_cuda +add_library(tensorpipe_cuda STATIC IMPORTED) + +set_target_properties(tensorpipe_cuda PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "/usr/local/cuda/include" + INTERFACE_LINK_LIBRARIES "tensorpipe;/usr/local/cuda/lib64/libcudart.so" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/TensorpipeTargets-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP)